diff options
| author | Bo Jiao <Bo.Jiao@mediatek.com> | 2023-10-20 12:30:57 +0200 |
|---|---|---|
| committer | Felix Fietkau <nbd@nbd.name> | 2023-12-07 18:50:20 +0100 |
| commit | 950d0abb5cd94f2b0710c5c42ac4398c91a7ff22 (patch) | |
| tree | 5e7fa426479d889e35314e313b01358318f1d27e /drivers/net | |
| parent | b8b36f47070f47dbfd3dc8eb0b674d6103306935 (diff) | |
| download | linux-950d0abb5cd94f2b0710c5c42ac4398c91a7ff22.tar.gz linux-950d0abb5cd94f2b0710c5c42ac4398c91a7ff22.tar.bz2 linux-950d0abb5cd94f2b0710c5c42ac4398c91a7ff22.zip | |
wifi: mt76: mt7996: add wed rx support
Similar to MT7915, enable Wireless Ethernet Ditpatcher for MT7996
to offload traffic received from the WLAN nic and transmitted on the
LAN one
Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
Signed-off-by: Bo Jiao <Bo.Jiao@mediatek.com>
Signed-off-by: Felix Fietkau <nbd@nbd.name>
Diffstat (limited to 'drivers/net')
| -rw-r--r-- | drivers/net/wireless/mediatek/mt76/dma.c | 149 | ||||
| -rw-r--r-- | drivers/net/wireless/mediatek/mt76/dma.h | 43 | ||||
| -rw-r--r-- | drivers/net/wireless/mediatek/mt76/mt76.h | 50 | ||||
| -rw-r--r-- | drivers/net/wireless/mediatek/mt76/mt7996/dma.c | 198 | ||||
| -rw-r--r-- | drivers/net/wireless/mediatek/mt76/mt7996/init.c | 175 | ||||
| -rw-r--r-- | drivers/net/wireless/mediatek/mt76/mt7996/mac.c | 41 | ||||
| -rw-r--r-- | drivers/net/wireless/mediatek/mt76/mt7996/mcu.c | 24 | ||||
| -rw-r--r-- | drivers/net/wireless/mediatek/mt76/mt7996/mcu.h | 2 | ||||
| -rw-r--r-- | drivers/net/wireless/mediatek/mt76/mt7996/mmio.c | 60 | ||||
| -rw-r--r-- | drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h | 55 | ||||
| -rw-r--r-- | drivers/net/wireless/mediatek/mt76/mt7996/regs.h | 62 |
11 files changed, 772 insertions, 87 deletions
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 12615b49c0d7..7c76afdaef68 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -189,7 +189,10 @@ static void mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) { Q_WRITE(q, desc_base, q->desc_dma); - Q_WRITE(q, ring_size, q->ndesc); + if (q->flags & MT_QFLAG_WED_RRO_EN) + Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc); + else + Q_WRITE(q, ring_size, q->ndesc); q->head = Q_READ(q, dma_idx); q->tail = q->head; } @@ -198,14 +201,16 @@ static void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q, bool reset_idx) { - int i; - if (!q || !q->ndesc) return; - /* clear descriptors */ - for (i = 0; i < q->ndesc; i++) - q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); + if (!mt76_queue_is_wed_rro_ind(q)) { + int i; + + /* clear descriptors */ + for (i = 0; i < q->ndesc; i++) + q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); + } if (reset_idx) { Q_WRITE(q, cpu_idx, 0); @@ -224,13 +229,22 @@ static int mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, struct mt76_queue_buf *buf, void *data) { - struct mt76_desc *desc = &q->desc[q->head]; struct mt76_queue_entry *entry = &q->entry[q->head]; struct mt76_txwi_cache *txwi = NULL; + struct mt76_desc *desc; u32 buf1 = 0, ctrl; int idx = q->head; int rx_token; + if (mt76_queue_is_wed_rro_ind(q)) { + struct mt76_wed_rro_desc *rro_desc; + + rro_desc = (struct mt76_wed_rro_desc *)q->desc; + data = &rro_desc[q->head]; + goto done; + } + + desc = &q->desc[q->head]; ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); if (mt76_queue_is_wed_rx(q)) { @@ -253,6 +267,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); WRITE_ONCE(desc->info, 0); +done: entry->dma_addr[0] = buf->addr; entry->dma_len[0] = buf->len; entry->txwi = txwi; @@ -401,19 +416,26 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, { struct mt76_queue_entry *e = &q->entry[idx]; struct mt76_desc *desc = &q->desc[idx]; - void *buf; + u32 ctrl, desc_info, buf1; + void *buf = e->buf; + + if (mt76_queue_is_wed_rro_ind(q)) + goto done; + ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); if (len) { - u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl); *more = !(ctrl & MT_DMA_CTL_LAST_SEC0); } + desc_info = le32_to_cpu(desc->info); if (info) - *info = le32_to_cpu(desc->info); + *info = desc_info; + + buf1 = le32_to_cpu(desc->buf1); + mt76_dma_should_drop_buf(drop, ctrl, buf1, desc_info); if (mt76_queue_is_wed_rx(q)) { - u32 buf1 = le32_to_cpu(desc->buf1); u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1); struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token); @@ -429,23 +451,16 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, t->ptr = NULL; mt76_put_rxwi(dev, t); - - if (drop) { - u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); - - *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A | - MT_DMA_CTL_DROP)); - + if (drop) *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP); - } } else { - buf = e->buf; - e->buf = NULL; dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0], SKB_WITH_OVERHEAD(q->buf_size), page_pool_get_dma_dir(q->page_pool)); } +done: + e->buf = NULL; return buf; } @@ -459,11 +474,16 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, if (!q->queued) return NULL; - if (flush) - q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); - else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) + if (mt76_queue_is_wed_rro_data(q)) return NULL; + if (!mt76_queue_is_wed_rro_ind(q)) { + if (flush) + q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); + else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) + return NULL; + } + q->tail = (q->tail + 1) % q->ndesc; q->queued--; @@ -615,11 +635,14 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, spin_lock_bh(&q->lock); while (q->queued < q->ndesc - 1) { + struct mt76_queue_buf qbuf = {}; enum dma_data_direction dir; - struct mt76_queue_buf qbuf; dma_addr_t addr; int offset; - void *buf; + void *buf = NULL; + + if (mt76_queue_is_wed_rro_ind(q)) + goto done; buf = mt76_get_page_pool_buf(q, &offset, q->buf_size); if (!buf) @@ -630,6 +653,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, dma_sync_single_for_device(dev->dma_dev, addr, len, dir); qbuf.addr = addr + q->buf_offset; +done: qbuf.len = len - q->buf_offset; qbuf.skip_unmap = false; if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) { @@ -639,7 +663,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, frames++; } - if (frames) + if (frames || mt76_queue_is_wed_rx(q)) mt76_dma_kick_queue(dev, q); spin_unlock_bh(&q->lock); @@ -650,8 +674,8 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) { #ifdef CONFIG_NET_MEDIATEK_SOC_WED - int ret, type, ring; - u8 flags; + int ret = 0, type, ring; + u16 flags; if (!q || !q->ndesc) return -EINVAL; @@ -678,7 +702,6 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) q->flags = 0; mt76_dma_queue_reset(dev, q); mt76_dma_rx_fill(dev, q, false); - q->flags = flags; ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs); if (!ret) @@ -690,9 +713,31 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) if (!ret) q->wed_regs = q->wed->rx_ring[ring].reg_base; break; + case MT76_WED_RRO_Q_DATA: + q->flags &= ~MT_QFLAG_WED; + __mt76_dma_queue_reset(dev, q, false); + mtk_wed_device_rro_rx_ring_setup(q->wed, ring, q->regs); + q->head = q->ndesc - 1; + q->queued = q->head; + break; + case MT76_WED_RRO_Q_MSDU_PG: + q->flags &= ~MT_QFLAG_WED; + __mt76_dma_queue_reset(dev, q, false); + mtk_wed_device_msdu_pg_rx_ring_setup(q->wed, ring, q->regs); + q->head = q->ndesc - 1; + q->queued = q->head; + break; + case MT76_WED_RRO_Q_IND: + q->flags &= ~MT_QFLAG_WED; + mt76_dma_queue_reset(dev, q); + mt76_dma_rx_fill(dev, q, false); + mtk_wed_device_ind_rx_ring_setup(q->wed, q->regs); + break; default: ret = -EINVAL; + break; } + q->flags = flags; return ret; #else @@ -716,11 +761,26 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, q->buf_size = bufsize; q->hw_idx = idx; - size = q->ndesc * sizeof(struct mt76_desc); - q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL); + size = mt76_queue_is_wed_rro_ind(q) ? sizeof(struct mt76_wed_rro_desc) + : sizeof(struct mt76_desc); + q->desc = dmam_alloc_coherent(dev->dma_dev, q->ndesc * size, + &q->desc_dma, GFP_KERNEL); if (!q->desc) return -ENOMEM; + if (mt76_queue_is_wed_rro_ind(q)) { + struct mt76_wed_rro_desc *rro_desc; + int i; + + rro_desc = (struct mt76_wed_rro_desc *)q->desc; + for (i = 0; i < q->ndesc; i++) { + struct mt76_wed_rro_ind *cmd; + + cmd = (struct mt76_wed_rro_ind *)&rro_desc[i]; + cmd->magic_cnt = MT_DMA_WED_IND_CMD_CNT - 1; + } + } + size = q->ndesc * sizeof(*q->entry); q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); if (!q->entry) @@ -734,8 +794,13 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, if (ret) return ret; - if (!mt76_queue_is_wed_tx_free(q)) - mt76_dma_queue_reset(dev, q); + if (mtk_wed_device_active(&dev->mmio.wed)) { + if ((mtk_wed_get_rx_capa(&dev->mmio.wed) && mt76_queue_is_wed_rro(q)) || + mt76_queue_is_wed_tx_free(q)) + return 0; + } + + mt76_dma_queue_reset(dev, q); return 0; } @@ -757,7 +822,8 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) if (!buf) break; - mt76_put_page_pool_buf(buf, false); + if (!mt76_queue_is_wed_rro(q)) + mt76_put_page_pool_buf(buf, false); } while (1); spin_lock_bh(&q->lock); @@ -773,13 +839,16 @@ static void mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid) { struct mt76_queue *q = &dev->q_rx[qid]; - int i; if (!q->ndesc) return; - for (i = 0; i < q->ndesc; i++) - q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); + if (!mt76_queue_is_wed_rro_ind(q)) { + int i; + + for (i = 0; i < q->ndesc; i++) + q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); + } mt76_dma_rx_cleanup(dev, q); @@ -991,6 +1060,10 @@ void mt76_dma_cleanup(struct mt76_dev *dev) mt76_for_each_q_rx(dev, i) { struct mt76_queue *q = &dev->q_rx[i]; + if (mtk_wed_device_active(&dev->mmio.wed) && + mt76_queue_is_wed_rro(q)) + continue; + netif_napi_del(&dev->napi[i]); mt76_dma_rx_cleanup(dev, q); diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h index 1b090d78cd05..e549e678b69f 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.h +++ b/drivers/net/wireless/mediatek/mt76/dma.h @@ -23,8 +23,17 @@ #define MT_DMA_PPE_CPU_REASON GENMASK(15, 11) #define MT_DMA_PPE_ENTRY GENMASK(30, 16) +#define MT_DMA_INFO_DMA_FRAG BIT(9) #define MT_DMA_INFO_PPE_VLD BIT(31) +#define MT_DMA_CTL_PN_CHK_FAIL BIT(13) +#define MT_DMA_CTL_VER_MASK BIT(7) + +#define MT_DMA_RRO_EN BIT(13) + +#define MT_DMA_WED_IND_CMD_CNT 8 +#define MT_DMA_WED_IND_REASON GENMASK(15, 12) + #define MT_DMA_HDR_LEN 4 #define MT_RX_INFO_LEN 4 #define MT_FCE_INFO_LEN 4 @@ -37,6 +46,11 @@ struct mt76_desc { __le32 info; } __packed __aligned(4); +struct mt76_wed_rro_desc { + __le32 buf0; + __le32 buf1; +} __packed __aligned(4); + enum mt76_qsel { MT_QSEL_MGMT, MT_QSEL_HCCA, @@ -54,9 +68,38 @@ enum mt76_mcu_evt_type { EVT_EVENT_DFS_DETECT_RSP, }; +enum mt76_dma_wed_ind_reason { + MT_DMA_WED_IND_REASON_NORMAL, + MT_DMA_WED_IND_REASON_REPEAT, + MT_DMA_WED_IND_REASON_OLDPKT, +}; + int mt76_dma_rx_poll(struct napi_struct *napi, int budget); void mt76_dma_attach(struct mt76_dev *dev); void mt76_dma_cleanup(struct mt76_dev *dev); int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset); +static inline void +mt76_dma_should_drop_buf(bool *drop, u32 ctrl, u32 buf1, u32 info) +{ + if (!drop) + return; + + *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A | MT_DMA_CTL_DROP)); + if (!(ctrl & MT_DMA_CTL_VER_MASK)) + return; + + switch (FIELD_GET(MT_DMA_WED_IND_REASON, buf1)) { + case MT_DMA_WED_IND_REASON_REPEAT: + *drop = true; + break; + case MT_DMA_WED_IND_REASON_OLDPKT: + *drop = !(info & MT_DMA_INFO_DMA_FRAG); + break; + default: + *drop = !!(ctrl & MT_DMA_CTL_PN_CHK_FAIL); + break; + } +} + #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 8389b493759c..7aac973723bc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -31,13 +31,20 @@ #define MT_QFLAG_WED_RING GENMASK(1, 0) #define MT_QFLAG_WED_TYPE GENMASK(4, 2) #define MT_QFLAG_WED BIT(5) +#define MT_QFLAG_WED_RRO BIT(6) +#define MT_QFLAG_WED_RRO_EN BIT(7) #define __MT_WED_Q(_type, _n) (MT_QFLAG_WED | \ FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \ FIELD_PREP(MT_QFLAG_WED_RING, _n)) +#define __MT_WED_RRO_Q(_type, _n) (MT_QFLAG_WED_RRO | __MT_WED_Q(_type, _n)) + #define MT_WED_Q_TX(_n) __MT_WED_Q(MT76_WED_Q_TX, _n) #define MT_WED_Q_RX(_n) __MT_WED_Q(MT76_WED_Q_RX, _n) #define MT_WED_Q_TXFREE __MT_WED_Q(MT76_WED_Q_TXFREE, 0) +#define MT_WED_RRO_Q_DATA(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_DATA, _n) +#define MT_WED_RRO_Q_MSDU_PG(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_MSDU_PG, _n) +#define MT_WED_RRO_Q_IND __MT_WED_RRO_Q(MT76_WED_RRO_Q_IND, 0) struct mt76_dev; struct mt76_phy; @@ -59,6 +66,9 @@ enum mt76_wed_type { MT76_WED_Q_TX, MT76_WED_Q_TXFREE, MT76_WED_Q_RX, + MT76_WED_RRO_Q_DATA, + MT76_WED_RRO_Q_MSDU_PG, + MT76_WED_RRO_Q_IND, }; struct mt76_bus_ops { @@ -194,6 +204,7 @@ struct mt76_queue { spinlock_t lock; spinlock_t cleanup_lock; struct mt76_queue_entry *entry; + struct mt76_rro_desc *rro_desc; struct mt76_desc *desc; u16 first; @@ -207,7 +218,7 @@ struct mt76_queue { u8 buf_offset; u8 hw_idx; - u8 flags; + u16 flags; struct mtk_wed_device *wed; u32 wed_regs; @@ -364,6 +375,17 @@ struct mt76_txq { bool aggr; }; +struct mt76_wed_rro_ind { + u32 se_id : 12; + u32 rsv : 4; + u32 start_sn : 12; + u32 ind_reason : 4; + u32 ind_cnt : 13; + u32 win_sz : 3; + u32 rsv2 : 13; + u32 magic_cnt : 3; +}; + struct mt76_txwi_cache { struct list_head list; dma_addr_t dma_addr; @@ -1581,10 +1603,32 @@ static inline bool mt76_queue_is_wed_tx_free(struct mt76_queue *q) FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_TXFREE; } +static inline bool mt76_queue_is_wed_rro(struct mt76_queue *q) +{ + return q->flags & MT_QFLAG_WED_RRO; +} + +static inline bool mt76_queue_is_wed_rro_ind(struct mt76_queue *q) +{ + return mt76_queue_is_wed_rro(q) && + FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_IND; +} + +static inline bool mt76_queue_is_wed_rro_data(struct mt76_queue *q) +{ + return mt76_queue_is_wed_rro(q) && + (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_DATA || + FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_MSDU_PG); +} + static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q) { - return (q->flags & MT_QFLAG_WED) && - FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX; + if (!(q->flags & MT_QFLAG_WED)) + return false; + + return FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX || + mt76_queue_is_wed_rro_ind(q) || mt76_queue_is_wed_rro_data(q); + } struct mt76_txwi_cache * diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c index 72912f376bc9..2221d22ccffb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c @@ -65,6 +65,29 @@ static void mt7996_dma_config(struct mt7996_dev *dev) RXQ_CONFIG(MT_RXQ_BAND2, WFDMA0, MT_INT_RX_DONE_BAND2, MT7996_RXQ_BAND2); RXQ_CONFIG(MT_RXQ_BAND2_WA, WFDMA0, MT_INT_RX_DONE_WA_TRI, MT7996_RXQ_MCU_WA_TRI); + if (dev->has_rro) { + /* band0 */ + RXQ_CONFIG(MT_RXQ_RRO_BAND0, WFDMA0, MT_INT_RX_DONE_RRO_BAND0, + MT7996_RXQ_RRO_BAND0); + RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND0, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND0, + MT7996_RXQ_MSDU_PG_BAND0); + RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0, MT_INT_RX_TXFREE_MAIN, + MT7996_RXQ_TXFREE0); + /* band1 */ + RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND1, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND1, + MT7996_RXQ_MSDU_PG_BAND1); + /* band2 */ + RXQ_CONFIG(MT_RXQ_RRO_BAND2, WFDMA0, MT_INT_RX_DONE_RRO_BAND2, + MT7996_RXQ_RRO_BAND2); + RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND2, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND2, + MT7996_RXQ_MSDU_PG_BAND2); + RXQ_CONFIG(MT_RXQ_TXFREE_BAND2, WFDMA0, MT_INT_RX_TXFREE_TRI, + MT7996_RXQ_TXFREE2); + + RXQ_CONFIG(MT_RXQ_RRO_IND, WFDMA0, MT_INT_RX_DONE_RRO_IND, + MT7996_RXQ_RRO_IND); + } + /* data tx queue */ TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0); TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1); @@ -93,6 +116,24 @@ static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs) mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x1a0, 0x10)); mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND2) + ofs, PREFETCH(0x2a0, 0x10)); + if (dev->has_rro) { + mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND0) + ofs, + PREFETCH(0x3a0, 0x10)); + mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND2) + ofs, + PREFETCH(0x4a0, 0x10)); + mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND0) + ofs, + PREFETCH(0x5a0, 0x4)); + mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND1) + ofs, + PREFETCH(0x5e0, 0x4)); + mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND2) + ofs, + PREFETCH(0x620, 0x4)); + mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND0) + ofs, + PREFETCH(0x660, 0x4)); + mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND2) + ofs, + PREFETCH(0x6a0, 0x4)); + } +#undef PREFETCH + mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + ofs, WF_WFDMA0_GLO_CFG_EXT1_CALC_MODE); } @@ -150,6 +191,7 @@ static void mt7996_dma_disable(struct mt7996_dev *dev, bool reset) void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset) { + struct mtk_wed_device *wed = &dev->mt76.mmio.wed; u32 hif1_ofs = 0; u32 irq_mask; @@ -158,11 +200,16 @@ void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset) /* enable WFDMA Tx/Rx */ if (!reset) { - mt76_set(dev, MT_WFDMA0_GLO_CFG, - MT_WFDMA0_GLO_CFG_TX_DMA_EN | - MT_WFDMA0_GLO_CFG_RX_DMA_EN | - MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | - MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); + if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) + mt76_set(dev, MT_WFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_TX_DMA_EN | + MT_WFDMA0_GLO_CFG_OMIT_TX_INFO); + else + mt76_set(dev, MT_WFDMA0_GLO_CFG, + MT_WFDMA0_GLO_CFG_TX_DMA_EN | + MT_WFDMA0_GLO_CFG_RX_DMA_EN | + MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | + MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); if (dev->hif2) mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, @@ -184,12 +231,12 @@ void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset) if (dev->tbtc_support) irq_mask |= MT_INT_BAND2_RX_DONE; - if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wed_reset) { + if (mtk_wed_device_active(wed) && wed_reset) { u32 wed_irq_mask = irq_mask; wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1; mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask); - mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask); + mtk_wed_device_start(wed, wed_irq_mask); } irq_mask = reset ? MT_INT_MCU_CMD : irq_mask; @@ -266,13 +313,85 @@ static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset) /* fix hardware limitation, pcie1's rx ring3 is not available * so, redirect pcie0 rx ring3 interrupt to pcie1 */ - mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL, - MT_WFDMA0_RX_INT_SEL_RING3); + if (mtk_wed_device_active(&dev->mt76.mmio.wed) && + dev->has_rro) + mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL + hif1_ofs, + MT_WFDMA0_RX_INT_SEL_RING6); + else + mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL, + MT_WFDMA0_RX_INT_SEL_RING3); } mt7996_dma_start(dev, reset, true); } +#ifdef CONFIG_NET_MEDIATEK_SOC_WED +int mt7996_dma_rro_init(struct mt7996_dev *dev) +{ + struct mt76_dev *mdev = &dev->mt76; + u32 irq_mask; + int ret; + + /* ind cmd */ + mdev->q_rx[MT_RXQ_RRO_IND].flags = MT_WED_RRO_Q_IND; + mdev->q_rx[MT_RXQ_RRO_IND].wed = &mdev->mmio.wed; + ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_RRO_IND], + MT_RXQ_ID(MT_RXQ_RRO_IND), + MT7996_RX_RING_SIZE, + 0, MT_RXQ_RRO_IND_RING_BASE); + if (ret) + return ret; + + /* rx msdu page queue for band0 */ + mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags = + MT_WED_RRO_Q_MSDU_PG(0) | MT_QFLAG_WED_RRO_EN; + mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].wed = &mdev->mmio.wed; + ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0], + MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND0), + MT7996_RX_RING_SIZE, + MT7996_RX_MSDU_PAGE_SIZE, + MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND0)); + if (ret) + return ret; + + if (dev->dbdc_support) { + /* rx msdu page queue for band1 */ + mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags = + MT_WED_RRO_Q_MSDU_PG(1) | MT_QFLAG_WED_RRO_EN; + mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].wed = &mdev->mmio.wed; + ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1], + MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND1), + MT7996_RX_RING_SIZE, + MT7996_RX_MSDU_PAGE_SIZE, + MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND1)); + if (ret) + return ret; + } + + if (dev->tbtc_support) { + /* rx msdu page queue for band2 */ + mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags = + MT_WED_RRO_Q_MSDU_PG(2) | MT_QFLAG_WED_RRO_EN; + mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].wed = &mdev->mmio.wed; + ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2], + MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND2), + MT7996_RX_RING_SIZE, + MT7996_RX_MSDU_PAGE_SIZE, + MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND2)); + if (ret) + return ret; + } + + irq_mask = mdev->mmio.irqmask | MT_INT_RRO_RX_DONE | + MT_INT_TX_DONE_BAND2; + mt76_wr(dev, MT_INT_MASK_CSR, irq_mask); + mtk_wed_device_start_hw_rro(&mdev->mmio.wed, irq_mask, false); + mt7996_irq_enable(dev, irq_mask); + + return 0; +} +#endif /* CONFIG_NET_MEDIATEK_SOC_WED */ + int mt7996_dma_init(struct mt7996_dev *dev) { struct mtk_wed_device *wed = &dev->mt76.mmio.wed; @@ -356,7 +475,7 @@ int mt7996_dma_init(struct mt7996_dev *dev) return ret; /* tx free notify event from WA for band0 */ - if (mtk_wed_device_active(wed)) { + if (mtk_wed_device_active(wed) && !dev->has_rro) { dev->mt76.q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE; dev->mt76.q_rx[MT_RXQ_MAIN_WA].wed = wed; } @@ -372,9 +491,6 @@ int mt7996_dma_init(struct mt7996_dev *dev) if (dev->tbtc_support || dev->mphy.band_idx == MT_BAND2) { /* rx data queue for band2 */ rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND2) + hif1_ofs; - if (mtk_wed_device_active(wed)) - rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND2); - ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2], MT_RXQ_ID(MT_RXQ_BAND2), MT7996_RX_RING_SIZE, @@ -386,7 +502,7 @@ int mt7996_dma_init(struct mt7996_dev *dev) /* tx free notify event from WA for band2 * use pcie0's rx ring3, but, redirect pcie0 rx ring3 interrupt to pcie1 */ - if (mtk_wed_device_active(wed_hif2)) { + if (mtk_wed_device_active(wed_hif2) && !dev->has_rro) { dev->mt76.q_rx[MT_RXQ_BAND2_WA].flags = MT_WED_Q_TXFREE; dev->mt76.q_rx[MT_RXQ_BAND2_WA].wed = wed_hif2; } @@ -400,6 +516,60 @@ int mt7996_dma_init(struct mt7996_dev *dev) return ret; } + if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed) && + dev->has_rro) { + /* rx rro data queue for band0 */ + dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags = + MT_WED_RRO_Q_DATA(0) | MT_QFLAG_WED_RRO_EN; + dev->mt76.q_rx[MT_RXQ_RRO_BAND0].wed = wed; + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND0], + MT_RXQ_ID(MT_RXQ_RRO_BAND0), + MT7996_RX_RING_SIZE, + MT7996_RX_BUF_SIZE, + MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND0)); + if (ret) + return ret; + + /* tx free notify event from WA for band0 */ + dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE; + dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed; + + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0], + MT_RXQ_ID(MT_RXQ_TXFREE_BAND0), + MT7996_RX_MCU_RING_SIZE, + MT7996_RX_BUF_SIZE, + MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0)); + if (ret) + return ret; + + if (dev->tbtc_support || dev->mphy.band_idx == MT_BAND2) { + /* rx rro data queue for band2 */ + dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags = + MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN; + dev->mt76.q_rx[MT_RXQ_RRO_BAND2].wed = wed; + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND2], + MT_RXQ_ID(MT_RXQ_RRO_BAND2), + MT7996_RX_RING_SIZE, + MT7996_RX_BUF_SIZE, + MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND2) + hif1_ofs); + if (ret) + return ret; + + /* tx free notify event from MAC for band2 */ + if (mtk_wed_device_active(wed_hif2)) { + dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2].flags = MT_WED_Q_TXFREE; + dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2].wed = wed_hif2; + } + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2], + MT_RXQ_ID(MT_RXQ_TXFREE_BAND2), + MT7996_RX_MCU_RING_SIZE, + MT7996_RX_BUF_SIZE, + MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND2) + hif1_ofs); + if (ret) + return ret; + } + } + ret = mt76_init_queues(dev, mt76_dma_rx_poll); if (ret < 0) return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c index e19c8fb71609..a1adbc65ae00 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c @@ -320,8 +320,17 @@ void mt7996_mac_init(struct mt7996_dev *dev) /* rro module init */ mt7996_mcu_set_rro(dev, UNI_RRO_SET_PLATFORM_TYPE, 2); - mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 3); - mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 1); + if (dev->has_rro) { + u16 timeout; + + timeout = mt76_rr(dev, MT_HW_REV) == MT_HW_REV1 ? 512 : 128; + mt7996_mcu_set_rro(dev, UNI_RRO_SET_FLUSH_TIMEOUT, timeout); + mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 1); + mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 0); + } else { + mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 3); + mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 1); + } mt7996_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET), MCU_WA_PARAM_HW_PATH_HIF_VER, @@ -475,6 +484,163 @@ void mt7996_wfsys_reset(struct mt7996_dev *dev) msleep(20); } +static int mt7996_wed_rro_init(struct mt7996_dev *dev) +{ +#ifdef CONFIG_NET_MEDIATEK_SOC_WED + struct mtk_wed_device *wed = &dev->mt76.mmio.wed; + u32 reg = MT_RRO_ADDR_ELEM_SEG_ADDR0; + struct mt7996_wed_rro_addr *addr; + void *ptr; + int i; + + if (!dev->has_rro) + return 0; + + if (!mtk_wed_device_active(wed)) + return 0; + + for (i = 0; i < ARRAY_SIZE(dev->wed_rro.ba_bitmap); i++) { + ptr = dmam_alloc_coherent(dev->mt76.dma_dev, + MT7996_RRO_BA_BITMAP_CR_SIZE, + &dev->wed_rro.ba_bitmap[i].phy_addr, + GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + dev->wed_rro.ba_bitmap[i].ptr = ptr; + } + + for (i = 0; i < ARRAY_SIZE(dev->wed_rro.addr_elem); i++) { + int j; + + ptr = dmam_alloc_coherent(dev->mt76.dma_dev, + MT7996_RRO_WINDOW_MAX_SIZE * sizeof(*addr), + &dev->wed_rro.addr_elem[i].phy_addr, + GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + dev->wed_rro.addr_elem[i].ptr = ptr; + memset(dev->wed_rro.addr_elem[i].ptr, 0, + MT7996_RRO_WINDOW_MAX_SIZE * sizeof(*addr)); + + addr = dev->wed_rro.addr_elem[i].ptr; + for (j = 0; j < MT7996_RRO_WINDOW_MAX_SIZE; j++) { + addr->signature = 0xff; + addr++; + } + + wed->wlan.ind_cmd.addr_elem_phys[i] = + dev->wed_rro.addr_elem[i].phy_addr; + } + + ptr = dmam_alloc_coherent(dev->mt76.dma_dev, + MT7996_RRO_WINDOW_MAX_LEN * sizeof(*addr), + &dev->wed_rro.session.phy_addr, + GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + dev->wed_rro.session.ptr = ptr; + addr = dev->wed_rro.session.ptr; + for (i = 0; i < MT7996_RRO_WINDOW_MAX_LEN; i++) { + addr->signature = 0xff; + addr++; + } + + /* rro hw init */ + /* TODO: remove line after WM has set */ + mt76_clear(dev, WF_RRO_AXI_MST_CFG, WF_RRO_AXI_MST_CFG_DIDX_OK); + + /* setup BA bitmap cache address */ + mt76_wr(dev, MT_RRO_BA_BITMAP_BASE0, + dev->wed_rro.ba_bitmap[0].phy_addr); + mt76_wr(dev, MT_RRO_BA_BITMAP_BASE1, 0); + mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT0, + dev->wed_rro.ba_bitmap[1].phy_addr); + mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT1, 0); + + /* setup Address element address */ + for (i = 0; i < ARRAY_SIZE(dev->wed_rro.addr_elem); i++) { + mt76_wr(dev, reg, dev->wed_rro.addr_elem[i].phy_addr >> 4); + reg += 4; + } + + /* setup Address element address - separate address segment mode */ + mt76_wr(dev, MT_RRO_ADDR_ARRAY_BASE1, + MT_RRO_ADDR_ARRAY_ELEM_ADDR_SEG_MODE); + + wed->wlan.ind_cmd.win_size = ffs(MT7996_RRO_WINDOW_MAX_LEN) - 6; + wed->wlan.ind_cmd.particular_sid = MT7996_RRO_MAX_SESSION; + wed->wlan.ind_cmd.particular_se_phys = dev->wed_rro.session.phy_addr; + wed->wlan.ind_cmd.se_group_nums = MT7996_RRO_ADDR_ELEM_LEN; + wed->wlan.ind_cmd.ack_sn_addr = MT_RRO_ACK_SN_CTRL; + + mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE0, 0x15010e00); + mt76_set(dev, MT_RRO_IND_CMD_SIGNATURE_BASE1, + MT_RRO_IND_CMD_SIGNATURE_BASE1_EN); + + /* particular session configure */ + /* use max session idx + 1 as particular session id */ + mt76_wr(dev, MT_RRO_PARTICULAR_CFG0, dev->wed_rro.session.phy_addr); + mt76_wr(dev, MT_RRO_PARTICULAR_CFG1, + MT_RRO_PARTICULAR_CONFG_EN | + FIELD_PREP(MT_RRO_PARTICULAR_SID, MT7996_RRO_MAX_SESSION)); + + /* interrupt enable */ + mt76_wr(dev, MT_RRO_HOST_INT_ENA, + MT_RRO_HOST_INT_ENA_HOST_RRO_DONE_ENA); + + /* rro ind cmd queue init */ + return mt7996_dma_rro_init(dev); +#else + return 0; +#endif +} + +static void mt7996_wed_rro_free(struct mt7996_dev *dev) +{ +#ifdef CONFIG_NET_MEDIATEK_SOC_WED + int i; + + if (!dev->has_rro) + return; + + if (!mtk_wed_device_active(&dev->mt76.mmio.wed)) + return; + + for (i = 0; i < ARRAY_SIZE(dev->wed_rro.ba_bitmap); i++) { + if (!dev->wed_rro.ba_bitmap[i].ptr) + continue; + + dmam_free_coherent(dev->mt76.dma_dev, + MT7996_RRO_BA_BITMAP_CR_SIZE, + dev->wed_rro.ba_bitmap[i].ptr, + dev->wed_rro.ba_bitmap[i].phy_addr); + } + + for (i = 0; i < ARRAY_SIZE(dev->wed_rro.addr_elem); i++) { + if (!dev->wed_rro.addr_elem[i].ptr) + continue; + + dmam_free_coherent(dev->mt76.dma_dev, + MT7996_RRO_WINDOW_MAX_SIZE * + sizeof(struct mt7996_wed_rro_addr), + dev->wed_rro.addr_elem[i].ptr, + dev->wed_rro.addr_elem[i].phy_addr); |
