// SPDX-License-Identifier: GPL-2.0+
#include <linux/bpf.h>
#include <linux/filter.h>
#include <net/page_pool/helpers.h>
#include "lan966x_main.h"
static int lan966x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
u64 *dataptr)
{
struct lan966x *lan966x = (struct lan966x *)fdma->priv;
struct lan966x_rx *rx = &lan966x->rx;
struct page *page;
page = page_pool_dev_alloc_pages(rx->page_pool);
if (unlikely(!page))
return -ENOMEM;
rx->page[dcb][db] = page;
*dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
return 0;
}
static int lan966x_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
u64 *dataptr)
{
struct lan966x *lan966x = (struct lan966x *)fdma->priv;
*dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr;
return 0;
}
static int lan966x_fdma_xdp_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
u64 *dataptr)
{
struct lan966x *lan966x = (struct lan966x *)fdma->priv;
*dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr + XDP_PACKET_HEADROOM;
return 0;
}
static int lan966x_fdma_channel_active(struct lan966x *lan966x)
{
return lan_rd(lan966x, FDMA_CH_ACTIVE);
}
static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
{
struct fdma *fdma = &rx->fdma;
int i, j;
for (i = 0; i < fdma->n_dcbs; ++i) {
for (j = 0; j < fdma->n_dbs; ++j)
page_pool_put_full_page(rx->page_pool,
rx->page[i][j], false);
}
}
static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
{
struct fdma *fdma = &rx->fdma;
struct page *page;
page = rx->page[fdma->dcb_index][fdma->db_index];
if (unlikely(!page))
return;
page_pool_recycle_direct(rx->page_pool, page);
}
static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
struct page_pool_params pp_params = {
.order = rx->page_order,
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.pool_size = rx->fdma.n_dcbs,
.nid = NUMA_NO_NODE,
.dev = lan966x->dev,
.dma_dir = DMA_FROM_DEVICE,
.offset = XDP_PACKET_HEADROOM,
.max_len = rx->max_mtu -
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
};
if (lan966x_xdp_present(lan966x))
pp_params.dma_dir = DMA_BIDIRECTIONAL;
rx->page_pool = page_pool_create(&pp_params);
for (int i = 0; i < lan966x->num_phys_ports; ++i) {
struct lan966x_port *port;
if (!lan966x->ports[i])
continue;
port = lan966x->ports[i];
xdp_rxq_info_unreg_mem_model(&port->xdp_rxq);
xdp_rxq_info_reg_mem_model(&port->xdp_rxq, MEM_TYPE_PAGE_POOL,
rx->page_pool);
}
return PTR_ERR_OR_ZERO(rx->page_pool);
}
static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
struct fdma *fdma = &rx->fdma;
int err;
if (lan966x_fdma_rx_alloc_page_pool(rx))
return PTR_ERR(rx->page_pool);
err = fdma_alloc_coherent(lan966x->dev, fdma);
if (err)
return err;
fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
FDMA_DCB_STATUS_INTR);
return 0;
}
static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
struct fdma *fdma = &rx->fdma;
u32 mask;
/* When activating a channel, first is required to write the first DCB
* address and then to activate it
*/
lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
FDMA_DCB_LLP(fdma->channel_id));
lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
FDMA_DCB_LLP1(fdma->channel_id));
lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
FDMA_CH_CFG_CH_MEM_SET(1),
lan966x, FDMA_CH_CFG(fdma->channel_id));
/* Start fdma */
lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
FDMA_PORT_CTRL_XTR_STOP,
lan966x, FDMA_PORT_CTRL(0));
/* Enable interrupts */
mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
mask |= BIT(fdma->channel_id);
lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
FDMA_INTR_DB_ENA_INTR_DB_ENA,
lan966x, FDMA_INTR_DB_ENA);
/* Activate the channel */
lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(fdma->channel_id)),
FDMA_CH_ACTIVATE_CH_ACTIVATE,
lan966x, FDMA_CH_ACTIVATE);
}
static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
{
struct lan966x *lan966x = rx->lan966x;
struct fdma *fdma =