diff options
-rw-r--r-- | MAINTAINERS | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/Kconfig | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/Makefile | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/enetc/Kconfig | 19 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/enetc/Makefile | 14 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/enetc/enetc.c | 1526 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/enetc/enetc.h | 200 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/enetc/enetc_cbdr.c | 110 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/enetc/enetc_ethtool.c | 137 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/enetc/enetc_hw.h | 351 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/enetc/enetc_pf.c | 831 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/enetc/enetc_pf.h | 32 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/enetc/enetc_vf.c | 160 |
13 files changed, 3390 insertions, 0 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index d5d36cedf0b9..a0245fd1b09a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6023,6 +6023,12 @@ L: linuxppc-dev@lists.ozlabs.org S: Maintained F: drivers/dma/fsldma.* +FREESCALE ENETC ETHERNET DRIVERS +M: Claudiu Manoil <claudiu.manoil@nxp.com> +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/ethernet/freescale/enetc/ + FREESCALE eTSEC ETHERNET DRIVER (GIANFAR) M: Claudiu Manoil <claudiu.manoil@nxp.com> L: netdev@vger.kernel.org diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index d3a62bc1f1c6..71793e03c3c8 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -97,5 +97,6 @@ config GIANFAR source "drivers/net/ethernet/freescale/dpaa/Kconfig" source "drivers/net/ethernet/freescale/dpaa2/Kconfig" +source "drivers/net/ethernet/freescale/enetc/Kconfig" endif # NET_VENDOR_FREESCALE diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile index 3b4ff08e3841..6a93293d31e0 100644 --- a/drivers/net/ethernet/freescale/Makefile +++ b/drivers/net/ethernet/freescale/Makefile @@ -23,3 +23,6 @@ obj-$(CONFIG_FSL_FMAN) += fman/ obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/ obj-$(CONFIG_FSL_DPAA2_ETH) += dpaa2/ + +obj-$(CONFIG_FSL_ENETC) += enetc/ +obj-$(CONFIG_FSL_ENETC_VF) += enetc/ diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig new file mode 100644 index 000000000000..f9dd26fbfc83 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/Kconfig @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0 +config FSL_ENETC + tristate "ENETC PF driver" + depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST) + help + This driver supports NXP ENETC gigabit ethernet controller PCIe + physical function (PF) devices, managing ENETC Ports at a privileged + level. + + If compiled as module (M), the module name is fsl-enetc. + +config FSL_ENETC_VF + tristate "ENETC VF driver" + depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST) + help + This driver supports NXP ENETC gigabit ethernet controller PCIe + virtual function (VF) devices enabled by the ENETC PF driver. + + If compiled as module (M), the module name is fsl-enetc-vf. diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile new file mode 100644 index 000000000000..a85ef2b475fe --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/Makefile @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o +fsl-enetc-$(CONFIG_FSL_ENETC) += enetc.o enetc_cbdr.o enetc_ethtool.o +fsl-enetc-objs := enetc_pf.o $(fsl-enetc-y) + +obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o + +ifeq ($(CONFIG_FSL_ENETC)$(CONFIG_FSL_ENETC_VF), yy) +fsl-enetc-vf-objs := enetc_vf.o +else +fsl-enetc-vf-$(CONFIG_FSL_ENETC_VF) += enetc.o enetc_cbdr.o \ + enetc_ethtool.o +fsl-enetc-vf-objs := enetc_vf.o $(fsl-enetc-vf-y) +endif diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c new file mode 100644 index 000000000000..f63be02cffae --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -0,0 +1,1526 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2017-2019 NXP */ + +#include "enetc.h" +#include <linux/tcp.h> +#include <linux/udp.h> +#include <linux/of_mdio.h> + +/* ENETC overhead: optional extension BD + 1 BD gap */ +#define ENETC_TXBDS_NEEDED(val) ((val) + 2) +/* max # of chained Tx BDs is 15, including head and extension BD */ +#define ENETC_MAX_SKB_FRAGS 13 +#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1) + +static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb); + +netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_bdr *tx_ring; + int count; + + tx_ring = priv->tx_ring[skb->queue_mapping]; + + if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS)) + if (unlikely(skb_linearize(skb))) + goto drop_packet_err; + + count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */ + if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) { + netif_stop_subqueue(ndev, tx_ring->index); + return NETDEV_TX_BUSY; + } + + count = enetc_map_tx_buffs(tx_ring, skb); + if (unlikely(!count)) + goto drop_packet_err; + + if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED) + netif_stop_subqueue(ndev, tx_ring->index); + + return NETDEV_TX_OK; + +drop_packet_err: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static bool enetc_tx_csum(struct sk_buff *skb, union enetc_tx_bd *txbd) +{ + int l3_start, l3_hsize; + u16 l3_flags, l4_flags; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return false; + + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + l4_flags = ENETC_TXBD_L4_TCP; + break; + case offsetof(struct udphdr, check): + l4_flags = ENETC_TXBD_L4_UDP; + break; + default: + skb_checksum_help(skb); + return false; + } + + l3_start = skb_network_offset(skb); + l3_hsize = skb_network_header_len(skb); + + l3_flags = 0; + if (skb->protocol == htons(ETH_P_IPV6)) + l3_flags = ENETC_TXBD_L3_IPV6; + + /* write BD fields */ + txbd->l3_csoff = enetc_txbd_l3_csoff(l3_start, l3_hsize, l3_flags); + txbd->l4_csoff = l4_flags; + + return true; +} + +static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring, + struct enetc_tx_swbd *tx_swbd) +{ + if (tx_swbd->is_dma_page) + dma_unmap_page(tx_ring->dev, tx_swbd->dma, + tx_swbd->len, DMA_TO_DEVICE); + else + dma_unmap_single(tx_ring->dev, tx_swbd->dma, + tx_swbd->len, DMA_TO_DEVICE); + tx_swbd->dma = 0; +} + +static void enetc_free_tx_skb(struct enetc_bdr *tx_ring, + struct enetc_tx_swbd *tx_swbd) +{ + if (tx_swbd->dma) + enetc_unmap_tx_buff(tx_ring, tx_swbd); + + if (tx_swbd->skb) { + dev_kfree_skb_any(tx_swbd->skb); + tx_swbd->skb = NULL; + } +} + +static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) +{ + struct enetc_tx_swbd *tx_swbd; + struct skb_frag_struct *frag; + int len = skb_headlen(skb); + union enetc_tx_bd temp_bd; + union enetc_tx_bd *txbd; + bool do_vlan, do_tstamp; + int i, count = 0; + unsigned int f; + dma_addr_t dma; + u8 flags = 0; + + i = tx_ring->next_to_use; + txbd = ENETC_TXBD(*tx_ring, i); + prefetchw(txbd); + + dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx_ring->dev, dma))) + goto dma_err; + + temp_bd.addr = cpu_to_le64(dma); + temp_bd.buf_len = cpu_to_le16(len); + temp_bd.lstatus = 0; + + tx_swbd = &tx_ring->tx_swbd[i]; + tx_swbd->dma = dma; + tx_swbd->len = len; + tx_swbd->is_dma_page = 0; + count++; + + do_vlan = skb_vlan_tag_present(skb); + do_tstamp = skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP; + + if (do_vlan || do_tstamp) + flags |= ENETC_TXBD_FLAGS_EX; + + if (enetc_tx_csum(skb, &temp_bd)) + flags |= ENETC_TXBD_FLAGS_CSUM | ENETC_TXBD_FLAGS_L4CS; + + /* first BD needs frm_len and offload flags set */ + temp_bd.frm_len = cpu_to_le16(skb->len); + temp_bd.flags = flags; + + if (flags & ENETC_TXBD_FLAGS_EX) { + u8 e_flags = 0; + *txbd = temp_bd; + enetc_clear_tx_bd(&temp_bd); + + /* add extension BD for VLAN and/or timestamping */ + flags = 0; + tx_swbd++; + txbd++; + i++; + if (unlikely(i == tx_ring->bd_count)) { + i = 0; + tx_swbd = tx_ring->tx_swbd; + txbd = ENETC_TXBD(*tx_ring, 0); + } + prefetchw(txbd); + + if (do_vlan) { + temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb)); + temp_bd.ext.tpid = 0; /* < C-TAG */ + e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS; + } + + if (do_tstamp) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP; + } + + temp_bd.ext.e_flags = e_flags; + count++; + } + + frag = &skb_shinfo(skb)->frags[0]; + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) { + len = skb_frag_size(frag); + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, + DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_err; + + *txbd = temp_bd; + enetc_clear_tx_bd(&temp_bd); + + flags = 0; + tx_swbd++; + txbd++; + i++; + if (unlikely(i == tx_ring->bd_count)) { + i = 0; + tx_swbd = tx_ring->tx_swbd; + txbd = ENETC_TXBD(*tx_ring, 0); + } + prefetchw(txbd); + + temp_bd.addr = cpu_to_le64(dma); + temp_bd.buf_len = cpu_to_le16(len); + + tx_swbd->dma = dma; + tx_swbd->len = len; + tx_swbd->is_dma_page = 1; + count++; + } + + /* last BD needs 'F' bit set */ + flags |= ENETC_TXBD_FLAGS_F; + temp_bd.flags = flags; + *txbd = temp_bd; + + tx_ring->tx_swbd[i].skb = skb; + + enetc_bdr_idx_inc(tx_ring, &i); + tx_ring->next_to_use = i; + + /* let H/W know BD ring has been updated */ + enetc_wr_reg(tx_ring->tpir, i); /* includes wmb() */ + + return count; + +dma_err: + dev_err(tx_ring->dev, "DMA map error"); + + do { + tx_swbd = &tx_ring->tx_swbd[i]; + enetc_free_tx_skb(tx_ring, tx_swbd); + if (i == 0) + i = tx_ring->bd_count; + i--; + } while (count--); + + return 0; +} + +static irqreturn_t enetc_msix(int irq, void *data) +{ + struct enetc_int_vector *v = data; + int i; + + /* disable interrupts */ + enetc_wr_reg(v->rbier, 0); + + for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings) + enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0); + + napi_schedule_irqoff(&v->napi); + + return IRQ_HANDLED; +} + +static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget); +static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, + struct napi_struct *napi, int work_limit); + +static int enetc_poll(struct napi_struct *napi, int budget) +{ + struct enetc_int_vector + *v = container_of(napi, struct enetc_int_vector, napi); + bool complete = true; + int work_done; + int i; + + for (i = 0; i < v->count_tx_rings; i++) + if (!enetc_clean_tx_ring(&v->tx_ring[i], budget)) + complete = false; + + work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget); + if (work_done == budget) + complete = false; + + if (!complete) + return budget; + + napi_complete_done(napi, work_done); + + /* enable interrupts */ + enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE); + + for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings) + enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), + ENETC_TBIER_TXTIE); + + return work_done; +} + +static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci) +{ + int pi = enetc_rd_reg(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK; + + return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi; +} + +static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) +{ + struct net_device *ndev = tx_ring->ndev; + int tx_frm_cnt = 0, tx_byte_cnt = 0; + struct enetc_tx_swbd *tx_swbd; + int i, bds_to_clean; + + i = tx_ring->next_to_clean; + tx_swbd = &tx_ring->tx_swbd[i]; + bds_to_clean = enetc_bd_ready_count(tx_ring, i); + + while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) { + bool is_eof = !!tx_swbd->skb; + + enetc_unmap_tx_buff(tx_ring, tx_swbd); + if (is_eof) { + napi_consume_skb(tx_swbd->skb, napi_budget); + tx_swbd->skb = NULL; + } + + tx_byte_cnt += tx_swbd->len; + + bds_to_clean--; + tx_swbd++; + i++; + if (unlikely(i == tx_ring->bd_count)) { + i = 0; + tx_swbd = tx_ring->tx_swbd; + } + + /* BD iteration loop end */ + if (is_eof) { + tx_frm_cnt++; + /* re-arm interrupt source */ + enetc_wr_reg(tx_ring->idr, BIT(tx_ring->index) | + BIT(16 + tx_ring->index)); + } + + if (unlikely(!bds_to_clean)) + bds_to_clean = enetc_bd_ready_count(tx_ring, i); + } + + tx_ring->next_to_clean = i; + tx_ring->stats.packets += tx_frm_cnt; + tx_ring->stats.bytes += tx_byte_cnt; + + if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) && + __netif_subqueue_stopped(ndev, tx_ring->index) && + (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) { + netif_wake_subqueue(ndev, tx_ring->index); + } + + return tx_frm_cnt != ENETC_DEFAULT_TX_WORK; +} + +static bool enetc_new_page(struct enetc_bdr *rx_ring, + struct enetc_rx_swbd *rx_swbd) +{ + struct page *page; + dma_addr_t addr; + + page = dev_alloc_page(); + if (unlikely(!page)) + return false; + + addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(rx_ring->dev, addr))) { + __free_page(page); + + return false; + } + + rx_swbd->dma = addr; + rx_swbd->page = page; + rx_swbd->page_offset = ENETC_RXB_PAD; + + return true; +} + +static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt) +{ + struct enetc_rx_swbd *rx_swbd; + union enetc_rx_bd *rxbd; + int i, j; + + i = rx_ring->next_to_use; + rx_swbd = &rx_ring->rx_swbd[i]; + rxbd = ENETC_RXBD(*rx_ring, i); + + for (j = 0; j < buff_cnt; j++) { + /* try reuse page */ + if (unlikely(!rx_swbd->page)) { + if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) { + rx_ring->stats.rx_alloc_errs++; + break; + } + } + + /* update RxBD */ + rxbd->w.addr = cpu_to_le64(rx_swbd->dma + + rx_swbd->page_offset); + /* clear 'R" as well */ + rxbd->r.lstatus = 0; + + rx_swbd++; + rxbd++; + i++; + if (unlikely(i == rx_ring->bd_count)) { + i = 0; + rx_swbd = rx_ring->rx_swbd; + rxbd = ENETC_RXBD(*rx_ring, 0); + } + } + + if (likely(j)) { + rx_ring->next_to_alloc = i; /* keep track from page reuse */ + rx_ring->next_to_use = i; + /* update ENETC's consumer index */ + enetc_wr_reg(rx_ring->rcir, i); + } + + return j; +} + +static void enetc_get_offloads(struct enetc_bdr *rx_ring, + union enetc_rx_bd *rxbd, struct sk_buff *skb) +{ + /* TODO: add tstamp, hashing */ + if (rx_ring->ndev->features & NETIF_F_RXCSUM) { + u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum); + + skb->csum = csum_unfold((__force __sum16)~htons(inet_csum)); + skb->ip_summed = CHECKSUM_COMPLETE; + } + + /* copy VLAN to skb, if one is extracted, for now we assume it's a + * standard TPID, but HW also supports custom values + */ + if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + le16_to_cpu(rxbd->r.vlan_opt)); +} + +static void enetc_process_skb(struct enetc_bdr *rx_ring, + struct sk_buff *skb) +{ + skb_record_rx_queue(skb, rx_ring->index); + skb->protocol = eth_type_trans(skb, rx_ring->ndev); +} + +static bool enetc_page_reusable(struct page *page) +{ + return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1); +} + +static void enetc_reuse_page(struct enetc_bdr *rx_ring, + struct enetc_rx_swbd *old) +{ + struct enetc_rx_swbd *new; + + new = &rx_ring->rx_swbd[rx_ring->next_to_alloc]; + + /* next buf that may reuse a page */ + enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc); + + /* copy page reference */ + *new = *old; +} + +static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring, + int i, u16 size) +{ + struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; + + dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma, + rx_swbd->page_offset, + size, DMA_FROM_DEVICE); + return rx_swbd; +} + +static void enetc_put_rx_buff(struct enetc_bdr *rx_ring, + struct enetc_rx_swbd *rx_swbd) +{ + if (likely(enetc_page_reusable(rx_swbd->page))) { + rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE; + page_ref_inc(rx_swbd->page); + + enetc_reuse_page(rx_ring, rx_swbd); + + /* sync for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma, + rx_swbd->page_offset, + ENETC_RXB_DMA_SIZE, + DMA_FROM_DEVICE); + } else { + dma_unmap_page(rx_ring->dev, rx_swbd->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + } + + rx_swbd->page = NULL; +} + +static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring, + int i, u16 size) +{ + struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); + struct sk_buff *skb; + void *ba; + + ba = page_address(rx_swbd->page) + rx_swbd->page_offset; + skb = build_skb(ba - ENETC_RXB_PAD, ENETC_RXB_TRUESIZE); + if (unlikely(!skb)) { + rx_ring->stats.rx_alloc_errs++; + return NULL; + } + + skb_reserve(skb, ENETC_RXB_PAD); + __skb_put(skb, size); + + enetc_put_rx_buff(rx_ring, rx_swbd); + + return skb; +} + +static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i, + u16 size, struct sk_buff *skb) +{ + struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page, + rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE); + + enetc_put_rx_buff(rx_ring, rx_swbd); +} + +#define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */ + +static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, + struct napi_struct *napi, int work_limit) +{ + int rx_frm_cnt = 0, rx_byte_cnt = 0; + int cleaned_cnt, i; + + cleaned_cnt = enetc_bd_unused(rx_ring); + /* next descriptor to process */ + i = rx_ring->next_to_clean; + + while (likely(rx_frm_cnt < work_limit)) { + union enetc_rx_bd *rxbd; + struct sk_buff *skb; + u32 bd_status; + u16 size; + + if (cleaned_cnt >= ENETC_RXBD_BUNDLE) { + int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt); + + cleaned_cnt -= count; + } + + rxbd = ENETC_RXBD(*rx_ring, i); + bd_status = le32_to_cpu(rxbd->r.lstatus); + if (!bd_status) + break; + + enetc_wr_reg(rx_ring->idr, BIT(rx_ring->index)); + dma_rmb(); /* for reading other rxbd fields */ + size = le16_to_cpu(rxbd->r.buf_len); + skb = enetc_map_rx_buff_to_skb(rx_ring, i, size); + if (!skb) + break; + + enetc_get_offloads(rx_ring, rxbd, skb); + + cleaned_cnt++; + rxbd++; + i++; + if (unlikely(i == rx_ring->bd_count)) { + i = 0; + rxbd = ENETC_RXBD(*rx_ring, 0); + } + + if (unlikely(bd_status & + ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) { + dev_kfree_skb(skb); + while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { + dma_rmb(); + bd_status = le32_to_cpu(rxbd->r.lstatus); + rxbd++; + i++; + if (unlikely(i == rx_ring->bd_count)) { + i = 0; + rxbd = ENETC_RXBD(*rx_ring, 0); + } + } + + rx_ring->ndev->stats.rx_dropped++; + rx_ring->ndev->stats.rx_errors++; + + break; + } + + /* not last BD in frame? */ + while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { + bd_status = le32_to_cpu(rxbd->r.lstatus); + size = ENETC_RXB_DMA_SIZE; + + if (bd_status & ENETC_RXBD_LSTATUS_F) { + dma_rmb(); + size = le16_to_cpu(rxbd->r.buf_len); + } + + enetc_add_rx_buff_to_skb(rx_ring, i, size, skb); + + cleaned_cnt++; + rxbd++; + i++; + if (unlikely(i == rx_ring->bd_count)) { + i = 0; + rxbd = ENETC_RXBD(*rx_ring, 0); + } + } + + rx_byte_cnt += skb->len; + + enetc_process_skb(rx_ring, skb); + + napi_gro_receive(napi, skb); + + rx_frm_cnt++; + } + + rx_ring->next_to_clean = i; + + rx_ring->stats.packets += rx_frm_cnt; + rx_ring->stats.bytes += rx_byte_cnt; + + return rx_frm_cnt; +} + +/* Probing and Init */ +void enetc_get_si_caps(struct enetc_si *si) +{ + struct enetc_hw *hw = &si->hw; + u32 val; + + /* find out how many of various resources we have to work with */ + val = enetc_rd(hw, ENETC_SICAPR0); + si->num_rx_rings = (val >> 16) & 0xff; + si->num_tx_rings = val & 0xff; +} + +static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size) +{ + r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size, + &r->bd_dma_base, GFP_KERNEL); + if (!r->bd_base) + return -ENOMEM; + + /* h/w requires 128B alignment */ + if (!IS_ALIGNED(r->bd_dma_base, 128)) { + dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base, + r->bd_dma_base); + return -EINVAL; + } + + return 0; +} + +static int enetc_alloc_txbdr(struct enetc_bdr *txr) +{ + int err; + + txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd)); + if (!txr->tx_swbd) + return -ENOMEM; + + err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd)); + if (err) { + vfree(txr->tx_swbd); + return err; + } + + txr->next_to_clean = 0; + txr->next_to_use = 0; + + return 0; +} + +static void enetc_free_txbdr(struct enetc_bdr *txr) +{ + int size, i; + + for (i = 0; i < txr->bd_count; i++) + enetc_free_tx_skb(txr, &txr->tx_swbd[i]); + + size = txr->bd_count * sizeof(union enetc_tx_bd); + + dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base); + txr->bd_base = NULL; + + vfree(txr->tx_swbd); + txr->tx_swbd = NULL; +} + +static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv) +{ + int i, err; + + for (i = 0; i < priv->num_tx_rings; i++) { + err = enetc_alloc_txbdr(priv->tx_ring[i]); + + if (err) + goto fail; + } + + return 0; + +fail: + while (i-- > 0) + enetc_free_txbdr(priv->tx_ring[i]); + + return err; +} + +static void enetc_free_tx_resources(struct enetc_ndev_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_free_txbdr(priv->tx_ring[i]); +} + +static int enetc_alloc_rxbdr(struct enetc_bdr *rxr) +{ + int err; + + rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd)); + if (!rxr->rx_swbd) + return -ENOMEM; + + err = enetc_dma_alloc_bdr(rxr, sizeof(union enetc_rx_bd)); + if (err) { + vfree(rxr->rx_swbd); + return err; + } + + rxr->next_to_clean = 0; + rxr->next_to_use = 0; + rxr->next_to_alloc = 0; + + return 0; +} + +static void enetc_free_rxbdr(struct enetc_bdr *rxr) +{ + int size; + + size = rxr->bd_count * sizeof(union enetc_rx_bd); + + dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base); + rxr->bd_base = NULL; + + vfree(rxr->rx_swbd); + rxr->rx_swbd = NULL; +} + +static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv) +{ + int i, err; + + for (i = 0; i < priv->num_rx_rings; i++) { + err = enetc_alloc_rxbdr(priv->rx_ring[i]); + + if (err) + goto fail; + } + + return 0; + +fail: + while (i-- > 0) + enetc_free_rxbdr(priv->rx_ring[i]); + + return err; +} + +static void enetc_free_rx_resources(struct enetc_ndev_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_free_rxbdr(priv->rx_ring[i]); +} + +static void enetc_free_tx_ring(struct enetc_bdr *tx_ring) +{ + int i; + + if (!tx_ring->tx_swbd) + return; + + for (i = 0; i < tx_ring->bd_count; i++) { + struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i]; + + enetc_free_tx_skb(tx_ring, tx_swbd); + } + + tx_ring->next_to_clean = 0; + tx_ring->next_to_use = 0; +} + +static void enetc_free_rx_ring(struct enetc_bdr *rx_ring) +{ + int i; + + if (!rx_ring->rx_swbd) + return; + + for (i = 0; i < rx_ring->bd_count; i++) { + struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; + + if (!rx_swbd->page) + continue; + + dma_unmap_page(rx_ring->dev, rx_swbd->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + __free_page(rx_swbd->page); + rx_swbd->page = NULL; + } + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->next_to_alloc = 0; +} + +static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_free_rx_ring(priv->rx_ring[i]); + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_free_tx_ring(priv->tx_ring[i]); +} + +static int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr) +{ + int size = cbdr->bd_count * sizeof(struct enetc_cbd); + + cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base, + GFP_KERNEL); + if (!cbdr->bd_base) + return -ENOMEM; + + /* h/w requires 128B alignment */ + if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) { + dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base); + return -EINVAL; + } + + cbdr->next_to_clean = 0; + cbdr->next_to_use = 0; + + return 0; +} + +static void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr) +{ + int size = cbdr->bd_count * sizeof(struct enetc_cbd); + + dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base); + cbdr->bd_base = NULL; +} + +static void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr) +{ + /* set CBDR cache attributes */ + enetc_wr(hw, ENETC_SICAR2, + ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); + + enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base)); + enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base)); + enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count)); + + enetc_wr(hw, ENETC_SICBDRPIR, 0); + enetc_wr(hw, ENETC_SICBDRCIR, 0); + + /* enable ring */ + enetc_wr(hw, ENETC_SICBDRMR, BIT(31)); + + cbdr->pir = hw->reg + ENETC_SICBDRPIR; + cbdr->cir = hw->reg + ENETC_SICBDRCIR; +} + +static void enetc_clear_cbdr(struct enetc_hw *hw) +{ + enetc_wr(hw, ENETC_SICBDRMR, 0); +} + +static int enetc_configure_si(struct enetc_ndev_priv *priv) +{ + struct enetc_si *si = priv->si; + struct enetc_hw *hw = &si->hw; + + enetc_setup_cbdr(hw, &si->cbd_ring); + /* set SI cache attributes */ + enetc_wr(hw, ENETC_SICAR0, + ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); + enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI); + /* enable SI */ + enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN); + + return 0; +} + +void enetc_init_si_rings_params(struct enetc_ndev_priv *priv) +{ + struct enetc_si *si = priv->si; + int cpus = num_online_cpus(); + + priv->tx_bd_count = ENETC_BDR_DEFAULT_SIZE; + priv->rx_bd_count = ENETC_BDR_DEFAULT_SIZE; + + /* Enable all available TX rings in order to configure as many + * priorities as possible, when needed. + * TODO: Make # of TX rings run-time configurable + */ + priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings); + priv->num_tx_rings = si->num_tx_rings; + priv->bdr_int_num = cpus; + + /* SI specific */ + si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE; +} + +int enetc_alloc_si_resources(struct enetc_ndev_priv *priv) +{ + struct enetc_si *si = priv->si; + int err; + + err = enetc_alloc_cbdr(priv->dev, &si->cbd_ring); + if (err) + return err; + + err = enetc_configure_si(priv); + if (err) + goto err_config_si; + + return 0; + +err_config_si: + enetc_clear_cbdr(&si->hw); + enetc_free_cbdr(priv->dev, &si->cbd_ring); + + return err; +} + +void enetc_free_si_resources(struct enetc_ndev_priv *priv) +{ + struct enetc_si *si = priv->si; + + enetc_clear_cbdr(&si->hw); + enetc_free_cbdr(priv->dev, &si->cbd_ring); +} + +static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) +{ + int idx = tx_ring->index; + u32 tbmr; + + enetc_txbdr_wr(hw, idx, ENETC_TBBAR0, + lower_32_bits(tx_ring->bd_dma_base)); + + enetc_txbdr_wr(hw, idx, ENETC_TBBAR1, + upper_32_bits(tx_ring->bd_dma_base)); + + WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */ + enetc_txbdr_wr(hw, idx, ENETC_TBLENR, + ENETC_RTBLENR_LEN(tx_ring->bd_count)); + + /* clearing PI/CI registers for Tx not supported, adjust sw indexes */ + tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR); + tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR); + + /* enable Tx ints by setting pkt thr to 1 */ + enetc_txbdr_wr(hw, idx, ENETC_TBICIR0, ENETC_TBICIR0_ICEN | 0x1); + + tbmr = ENETC_TBMR_EN; + if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) + tbmr |= ENETC_TBMR_VIH; + + /* enable ring */ + enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr); + + tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR); + tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR); + tx_ring->idr = hw->reg + ENETC_SITXIDR; +} + +static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) +{ + int idx = rx_ring->index; + u32 rbmr; + + enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0, + lower_32_bits(rx_ring->bd_dma_base)); + + enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1, + upper_32_bits(rx_ring->bd_dma_base)); + + WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */ + enetc_rxbdr_wr(hw, idx, ENETC_RBLENR, + ENETC_RTBLENR_LEN(rx_ring->bd_count)); + + enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE); + + enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0); + + /* enable Rx ints by setting pkt thr to 1 */ + enetc_rxbdr_wr(hw, idx, ENETC_RBICIR0, ENETC_RBICIR0_ICEN | 0x1); + + rbmr = ENETC_RBMR_EN; + if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) + rbmr |= ENETC_RBMR_VTE; + + rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR); + rx_ring->idr = hw->reg + ENETC_SIRXIDR; + + enetc_refill_rx_ring( |