diff options
Diffstat (limited to 'drivers/net/wireless/ath/ath11k')
21 files changed, 1415 insertions, 349 deletions
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c index 920abce9053a..5cbba9a8b6ba 100644 --- a/drivers/net/wireless/ath/ath11k/ahb.c +++ b/drivers/net/wireless/ath/ath11k/ahb.c @@ -874,11 +874,11 @@ static int ath11k_ahb_setup_msi_resources(struct ath11k_base *ab) ab->pci.msi.ep_base_data = int_prop + 32; for (i = 0; i < ab->pci.msi.config->total_vectors; i++) { - res = platform_get_resource(pdev, IORESOURCE_IRQ, i); - if (!res) - return -ENODEV; + ret = platform_get_irq(pdev, i); + if (ret < 0) + return ret; - ab->pci.msi.irqs[i] = res->start; + ab->pci.msi.irqs[i] = ret; } set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags); @@ -1078,6 +1078,12 @@ static int ath11k_ahb_fw_resource_deinit(struct ath11k_base *ab) struct iommu_domain *iommu; size_t unmapped_size; + /* Chipsets not requiring MSA would have not initialized + * MSA resources, return success in such cases. + */ + if (!ab->hw_params.fixed_fw_mem) + return 0; + if (ab_ahb->fw.use_tz) return 0; @@ -1174,7 +1180,7 @@ static int ath11k_ahb_probe(struct platform_device *pdev) * to a new space for accessing them. */ ab->mem_ce = ioremap(ce_remap->base, ce_remap->size); - if (IS_ERR(ab->mem_ce)) { + if (!ab->mem_ce) { dev_err(&pdev->dev, "ce ioremap error\n"); ret = -ENOMEM; goto err_core_free; diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 75fdbe4ef83a..b1b90bd34d67 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -116,7 +116,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .tcl_ring_retry = true, .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, - .ftm_responder = true, }, { .hw_rev = ATH11K_HW_IPQ6018_HW10, @@ -199,7 +198,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_fw_mac_sequence = false, - .ftm_responder = true, }, { .name = "qca6390 hw2.0", @@ -284,7 +282,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_fw_mac_sequence = true, - .ftm_responder = false, }, { .name = "qcn9074 hw1.0", @@ -366,7 +363,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_fw_mac_sequence = false, - .ftm_responder = true, }, { .name = "wcn6855 hw2.0", @@ -451,7 +447,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_fw_mac_sequence = true, - .ftm_responder = false, }, { .name = "wcn6855 hw2.1", @@ -534,7 +529,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_fw_mac_sequence = true, - .ftm_responder = false, }, { .name = "wcn6750 hw1.0", @@ -599,7 +593,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .current_cc_support = true, .dbr_debug_support = false, .global_reset = false, - .bios_sar_capa = NULL, + .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855, .m3_fw_support = false, .fixed_bdf_addr = false, .fixed_mem_region = false, @@ -615,7 +609,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .tx_ring_size = DP_TCL_DATA_RING_SIZE_WCN6750, .smp2p_wow_exit = true, .support_fw_mac_sequence = true, - .ftm_responder = false, }, { .hw_rev = ATH11K_HW_IPQ5018_HW10, @@ -695,7 +688,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_fw_mac_sequence = false, - .ftm_responder = true, }, }; diff --git a/drivers/net/wireless/ath/ath11k/dbring.c b/drivers/net/wireless/ath/ath11k/dbring.c index 2107ec05d14f..5536e8642331 100644 --- a/drivers/net/wireless/ath/ath11k/dbring.c +++ b/drivers/net/wireless/ath/ath11k/dbring.c @@ -26,13 +26,13 @@ int ath11k_dbring_validate_buffer(struct ath11k *ar, void *buffer, u32 size) static void ath11k_dbring_fill_magic_value(struct ath11k *ar, void *buffer, u32 size) { - u32 *temp; - int idx; - - size = size >> 2; + /* memset32 function fills buffer payload with the ATH11K_DB_MAGIC_VALUE + * and the variable size is expected to be the number of u32 values + * to be stored, not the number of bytes. + */ + size = size / sizeof(u32); - for (idx = 0, temp = buffer; idx < size; idx++, temp++) - *temp++ = ATH11K_DB_MAGIC_VALUE; + memset32(buffer, ATH11K_DB_MAGIC_VALUE, size); } static int ath11k_dbring_bufs_replenish(struct ath11k *ar, diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h index 2b97cbbd28cb..0bbd58a380de 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h +++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h @@ -143,7 +143,8 @@ enum htt_tx_pdev_underrun_enum { /* Bytes stored in little endian order */ /* Length should be multiple of DWORD */ struct htt_stats_string_tlv { - u32 data[0]; /* Can be variable length */ + /* Can be variable length */ + DECLARE_FLEX_ARRAY(u32, data); } __packed; #define HTT_STATS_MAC_ID GENMASK(7, 0) @@ -205,27 +206,32 @@ struct htt_tx_pdev_stats_cmn_tlv { /* NOTE: Variable length TLV, use length spec to infer array size */ struct htt_tx_pdev_stats_urrn_tlv_v { - u32 urrn_stats[0]; /* HTT_TX_PDEV_MAX_URRN_STATS */ + /* HTT_TX_PDEV_MAX_URRN_STATS */ + DECLARE_FLEX_ARRAY(u32, urrn_stats); }; /* NOTE: Variable length TLV, use length spec to infer array size */ struct htt_tx_pdev_stats_flush_tlv_v { - u32 flush_errs[0]; /* HTT_TX_PDEV_MAX_FLUSH_REASON_STATS */ + /* HTT_TX_PDEV_MAX_FLUSH_REASON_STATS */ + DECLARE_FLEX_ARRAY(u32, flush_errs); }; /* NOTE: Variable length TLV, use length spec to infer array size */ struct htt_tx_pdev_stats_sifs_tlv_v { - u32 sifs_status[0]; /* HTT_TX_PDEV_MAX_SIFS_BURST_STATS */ + /* HTT_TX_PDEV_MAX_SIFS_BURST_STATS */ + DECLARE_FLEX_ARRAY(u32, sifs_status); }; /* NOTE: Variable length TLV, use length spec to infer array size */ struct htt_tx_pdev_stats_phy_err_tlv_v { - u32 phy_errs[0]; /* HTT_TX_PDEV_MAX_PHY_ERR_STATS */ + /* HTT_TX_PDEV_MAX_PHY_ERR_STATS */ + DECLARE_FLEX_ARRAY(u32, phy_errs); }; /* NOTE: Variable length TLV, use length spec to infer array size */ struct htt_tx_pdev_stats_sifs_hist_tlv_v { - u32 sifs_hist_status[0]; /* HTT_TX_PDEV_SIFS_BURST_HIST_STATS */ + /* HTT_TX_PDEV_SIFS_BURST_HIST_STATS */ + DECLARE_FLEX_ARRAY(u32, sifs_hist_status); }; struct htt_tx_pdev_stats_tx_ppdu_stats_tlv_v { @@ -590,20 +596,20 @@ struct htt_tx_hwq_difs_latency_stats_tlv_v { /* NOTE: Variable length TLV, use length spec to infer array size */ struct htt_tx_hwq_cmd_result_stats_tlv_v { - /* Histogram of sched cmd result */ - u32 cmd_result[0]; /* HTT_TX_HWQ_MAX_CMD_RESULT_STATS */ + /* Histogram of sched cmd result, HTT_TX_HWQ_MAX_CMD_RESULT_STATS */ + DECLARE_FLEX_ARRAY(u32, cmd_result); }; /* NOTE: Variable length TLV, use length spec to infer array size */ struct htt_tx_hwq_cmd_stall_stats_tlv_v { - /* Histogram of various pause conitions */ - u32 cmd_stall_status[0]; /* HTT_TX_HWQ_MAX_CMD_STALL_STATS */ + /* Histogram of various pause conitions, HTT_TX_HWQ_MAX_CMD_STALL_STATS */ + DECLARE_FLEX_ARRAY(u32, cmd_stall_status); }; /* NOTE: Variable length TLV, use length spec to infer array size */ struct htt_tx_hwq_fes_result_stats_tlv_v { - /* Histogram of number of user fes result */ - u32 fes_result[0]; /* HTT_TX_HWQ_MAX_FES_RESULT_STATS */ + /* Histogram of number of user fes result, HTT_TX_HWQ_MAX_FES_RESULT_STATS */ + DECLARE_FLEX_ARRAY(u32, fes_result); }; /* NOTE: Variable length TLV, use length spec to infer array size @@ -635,8 +641,8 @@ struct htt_tx_hwq_tried_mpdu_cnt_hist_tlv_v { * #define WAL_TXOP_USED_HISTOGRAM_INTERVAL 1000 ( 1 ms ) */ struct htt_tx_hwq_txop_used_cnt_hist_tlv_v { - /* Histogram of txop used cnt */ - u32 txop_used_cnt_hist[0]; /* HTT_TX_HWQ_TXOP_USED_CNT_HIST */ + /* Histogram of txop used cnt, HTT_TX_HWQ_TXOP_USED_CNT_HIST */ + DECLARE_FLEX_ARRAY(u32, txop_used_cnt_hist); }; /* == TX SELFGEN STATS == */ @@ -804,17 +810,20 @@ struct htt_tx_pdev_mpdu_stats_tlv { /* == TX SCHED STATS == */ /* NOTE: Variable length TLV, use length spec to infer array size */ struct htt_sched_txq_cmd_posted_tlv_v { - u32 sched_cmd_posted[0]; /* HTT_TX_PDEV_SCHED_TX_MODE_MAX */ + /* HTT_TX_PDEV_SCHED_TX_MODE_MAX */ + DECLARE_FLEX_ARRAY(u32, sched_cmd_posted); }; /* NOTE: Variable length TLV, use length spec to infer array size */ struct htt_sched_txq_cmd_reaped_tlv_v { - u32 sched_cmd_reaped[0]; /* HTT_TX_PDEV_SCHED_TX_MODE_MAX */ + /* HTT_TX_PDEV_SCHED_TX_MODE_MAX */ + DECLARE_FLEX_ARRAY(u32, sched_cmd_reaped); }; /* NOTE: Variable length TLV, use length spec to infer array size */ struct htt_sched_txq_sched_order_su_tlv_v { - u32 sched_order_su[0]; /* HTT_TX_PDEV_NUM_SCHED_ORDER_LOG */ + /* HTT_TX_PDEV_NUM_SCHED_ORDER_LOG */ + DECLARE_FLEX_ARRAY(u32, sched_order_su); }; enum htt_sched_txq_sched_ineligibility_tlv_enum { @@ -842,7 +851,7 @@ enum htt_sched_txq_sched_ineligibility_tlv_enum { /* NOTE: Variable length TLV, use length spec to infer array size */ struct htt_sched_txq_sched_ineligibility_tlv_v { /* indexed by htt_sched_txq_sched_ineligibility_tlv_enum */ - u32 sched_ineligibility[0]; + DECLARE_FLEX_ARRAY(u32, sched_ineligibility); }; #define HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID GENMASK(7, 0) @@ -888,18 +897,20 @@ struct htt_stats_tx_sched_cmn_tlv { /* NOTE: Variable length TLV, use length spec to infer array size */ struct htt_tx_tqm_gen_mpdu_stats_tlv_v { - u32 gen_mpdu_end_reason[0]; /* HTT_TX_TQM_MAX_GEN_MPDU_END_REASON */ + /* HTT_TX_TQM_MAX_GEN_MPDU_END_REASON */ + DECLARE_FLEX_ARRAY(u32, gen_mpdu_end_reason); }; /* NOTE: Variable length TLV, use length spec to infer array size */ struct htt_tx_tqm_list_mpdu_stats_tlv_v { - u32 list_mpdu_end_reason[0]; /* HTT_TX_TQM_MAX_LIST_MPDU_END_REASON */ + /* HTT_TX_TQM_MAX_LIST_MPDU_END_REASON */ + DECLARE_FLEX_ARRAY(u32, list_mpdu_end_reason); }; /* NOTE: Variable length TLV, use length spec to infer array size */ struct htt_tx_tqm_list_mpdu_cnt_tlv_v { - u32 list_mpdu_cnt_hist[0]; - /* HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS */ + /* HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS */ + DECLARE_FLEX_ARRAY(u32, list_mpdu_cnt_hist); }; struct htt_tx_tqm_pdev_stats_tlv_v { @@ -1098,7 +1109,7 @@ struct htt_tx_de_compl_stats_tlv { * ENTRIES_PER_BIN_COUNT) */ struct htt_tx_de_fw2wbm_ring_full_hist_tlv { - u32 fw2wbm_ring_full_hist[0]; + DECLARE_FLEX_ARRAY(u32, fw2wbm_ring_full_hist); }; struct htt_tx_de_cmn_stats_tlv { @@ -1151,7 +1162,7 @@ struct htt_ring_if_cmn_tlv { /* NOTE: Variable length TLV, use length spec to infer array size */ struct htt_sfm_client_user_tlv_v { /* Number of DWORDS used per user and per client */ - u32 dwords_used_by_user_n[0]; + DECLARE_FLEX_ARRAY(u32, dwords_used_by_user_n); }; struct htt_sfm_client_tlv { @@ -1436,12 +1447,14 @@ struct htt_rx_soc_fw_stats_tlv { /* NOTE: Variable length TLV, use length spec to infer array size */ struct htt_rx_soc_fw_refill_ring_empty_tlv_v { - u32 refill_ring_empty_cnt[0]; /* HTT_RX_STATS_REFILL_MAX_RING */ + /* HTT_RX_STATS_REFILL_MAX_RING */ + DECLARE_FLEX_ARRAY(u32, refill_ring_empty_cnt); }; /* NOTE: Variable length TLV, use length spec to infer array size */ struct htt_rx_soc_fw_refill_ring_num_refill_tlv_v { - u32 refill_ring_num_refill[0]; /* HTT_RX_STATS_REFILL_MAX_RING */ + /* HTT_RX_STATS_REFILL_MAX_RING */ + DECLARE_FLEX_ARRAY(u32, refill_ring_num_refill); }; /* RXDMA error code from WBM released packets */ @@ -1473,7 +1486,7 @@ enum htt_rx_rxdma_error_code_enum { /* NOTE: Variable length TLV, use length spec to infer array size */ struct htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v { - u32 rxdma_err[0]; /* HTT_RX_RXDMA_MAX_ERR_CODE */ + DECLARE_FLEX_ARRAY(u32, rxdma_err); /* HTT_RX_RXDMA_MAX_ERR_CODE */ }; /* REO error code from WBM released packets */ @@ -1505,7 +1518,7 @@ enum htt_rx_reo_error_code_enum { /* NOTE: Variable length TLV, use length spec to infer array size */ struct htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v { - u32 reo_err[0]; /* HTT_RX_REO_MAX_ERR_CODE */ + DECLARE_FLEX_ARRAY(u32, reo_err); /* HTT_RX_REO_MAX_ERR_CODE */ }; /* == RX PDEV STATS == */ @@ -1622,13 +1635,13 @@ struct htt_rx_pdev_fw_stats_phy_err_tlv { /* NOTE: Variable length TLV, use length spec to infer array size */ struct htt_rx_pdev_fw_ring_mpdu_err_tlv_v { /* Num error MPDU for each RxDMA error type */ - u32 fw_ring_mpdu_err[0]; /* HTT_RX_STATS_RXDMA_MAX_ERR */ + DECLARE_FLEX_ARRAY(u32, fw_ring_mpdu_err); /* HTT_RX_STATS_RXDMA_MAX_ERR */ }; /* NOTE: Variable length TLV, use length spec to infer array size */ struct htt_rx_pdev_fw_mpdu_drop_tlv_v { /* Num MPDU dropped */ - u32 fw_mpdu_drop[0]; /* HTT_RX_STATS_FW_DROP_REASON_MAX */ + DECLARE_FLEX_ARRAY(u32, fw_mpdu_drop); /* HTT_RX_STATS_FW_DROP_REASON_MAX */ }; #define HTT_PDEV_CCA_STATS_TX_FRAME_INFO_PRESENT (0x1) diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c index f5156a7fbdd7..d070bcb3fe24 100644 --- a/drivers/net/wireless/ath/ath11k/dp.c +++ b/drivers/net/wireless/ath/ath11k/dp.c @@ -36,6 +36,7 @@ void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr) } ath11k_peer_rx_tid_cleanup(ar, peer); + peer->dp_setup_done = false; crypto_free_shash(peer->tfm_mmic); spin_unlock_bh(&ab->base_lock); } @@ -72,7 +73,8 @@ int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr) ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id); if (ret) { ath11k_warn(ab, "failed to setup rx defrag context\n"); - return ret; + tid--; + goto peer_clean; } /* TODO: Setup other peer specific resource used in data path */ diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h index be9eafc872b3..d04f78ab6b37 100644 --- a/drivers/net/wireless/ath/ath11k/dp.h +++ b/drivers/net/wireless/ath/ath11k/dp.h @@ -214,7 +214,7 @@ struct ath11k_pdev_dp { #define DP_REO_REINJECT_RING_SIZE 32 #define DP_RX_RELEASE_RING_SIZE 1024 #define DP_REO_EXCEPTION_RING_SIZE 128 -#define DP_REO_CMD_RING_SIZE 128 +#define DP_REO_CMD_RING_SIZE 256 #define DP_REO_STATUS_RING_SIZE 2048 #define DP_RXDMA_BUF_RING_SIZE 4096 #define DP_RXDMA_REFILL_RING_SIZE 2048 @@ -303,12 +303,16 @@ struct ath11k_dp { #define HTT_TX_WBM_COMP_STATUS_OFFSET 8 +#define HTT_INVALID_PEER_ID 0xffff + /* HTT tx completion is overlaid in wbm_release_ring */ #define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(12, 9) #define HTT_TX_WBM_COMP_INFO0_REINJECT_REASON GENMASK(16, 13) #define HTT_TX_WBM_COMP_INFO0_REINJECT_REASON GENMASK(16, 13) #define HTT_TX_WBM_COMP_INFO1_ACK_RSSI GENMASK(31, 24) +#define HTT_TX_WBM_COMP_INFO2_SW_PEER_ID GENMASK(15, 0) +#define HTT_TX_WBM_COMP_INFO2_VALID BIT(21) struct htt_tx_wbm_completion { u32 info0; diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index b65a84a88264..f67ce62b2b48 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -389,10 +389,10 @@ int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id, goto fail_free_skb; spin_lock_bh(&rx_ring->idr_lock); - buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, - rx_ring->bufs_max * 3, GFP_ATOMIC); + buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 1, + (rx_ring->bufs_max * 3) + 1, GFP_ATOMIC); spin_unlock_bh(&rx_ring->idr_lock); - if (buf_id < 0) + if (buf_id <= 0) goto fail_dma_unmap; desc = ath11k_hal_srng_src_get_next_entry(ab, srng); @@ -435,7 +435,6 @@ fail_free_skb: static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar, struct dp_rxdma_ring *rx_ring) { - struct ath11k_pdev_dp *dp = &ar->dp; struct sk_buff *skb; int buf_id; @@ -453,28 +452,6 @@ static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar, idr_destroy(&rx_ring->bufs_idr); spin_unlock_bh(&rx_ring->idr_lock); - /* if rxdma1_enable is false, mon_status_refill_ring - * isn't setup, so don't clean. - */ - if (!ar->ab->hw_params.rxdma1_enable) - return 0; - - rx_ring = &dp->rx_mon_status_refill_ring[0]; - - spin_lock_bh(&rx_ring->idr_lock); - idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { - idr_remove(&rx_ring->bufs_idr, buf_id); - /* XXX: Understand where internal driver does this dma_unmap - * of rxdma_buffer. - */ - dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, - skb->len + skb_tailroom(skb), DMA_BIDIRECTIONAL); - dev_kfree_skb_any(skb); - } - - idr_destroy(&rx_ring->bufs_idr); - spin_unlock_bh(&rx_ring->idr_lock); - return 0; } @@ -691,13 +668,18 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab) struct ath11k_dp *dp = &ab->dp; struct dp_reo_cmd *cmd, *tmp; struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache; + struct dp_rx_tid *rx_tid; spin_lock_bh(&dp->reo_cmd_lock); list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { list_del(&cmd->list); - dma_unmap_single(ab->dev, cmd->data.paddr, - cmd->data.size, DMA_BIDIRECTIONAL); - kfree(cmd->data.vaddr); + rx_tid = &cmd->data; + if (rx_tid->vaddr) { + dma_unmap_single(ab->dev, rx_tid->paddr, + rx_tid->size, DMA_BIDIRECTIONAL); + kfree(rx_tid->vaddr); + rx_tid->vaddr = NULL; + } kfree(cmd); } @@ -705,9 +687,13 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab) &dp->reo_cmd_cache_flush_list, list) { list_del(&cmd_cache->list); dp->reo_cmd_cache_flush_count--; - dma_unmap_single(ab->dev, cmd_cache->data.paddr, - cmd_cache->data.size, DMA_BIDIRECTIONAL); - kfree(cmd_cache->data.vaddr); + rx_tid = &cmd_cache->data; + if (rx_tid->vaddr) { + dma_unmap_single(ab->dev, rx_tid->paddr, + rx_tid->size, DMA_BIDIRECTIONAL); + kfree(rx_tid->vaddr); + rx_tid->vaddr = NULL; + } kfree(cmd_cache); } spin_unlock_bh(&dp->reo_cmd_lock); @@ -721,10 +707,12 @@ static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx, if (status != HAL_REO_CMD_SUCCESS) ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", rx_tid->tid, status); - - dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size, - DMA_BIDIRECTIONAL); - kfree(rx_tid->vaddr); + if (rx_tid->vaddr) { + dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size, + DMA_BIDIRECTIONAL); + kfree(rx_tid->vaddr); + rx_tid->vaddr = NULL; + } } static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab, @@ -763,6 +751,7 @@ static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab, dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, DMA_BIDIRECTIONAL); kfree(rx_tid->vaddr); + rx_tid->vaddr = NULL; } } @@ -815,6 +804,7 @@ free_desc: dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, DMA_BIDIRECTIONAL); kfree(rx_tid->vaddr); + rx_tid->vaddr = NULL; } void ath11k_peer_rx_tid_delete(struct ath11k *ar, @@ -827,6 +817,8 @@ void ath11k_peer_rx_tid_delete(struct ath11k *ar, if (!rx_tid->active) return; + rx_tid->active = false; + cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; cmd.addr_lo = lower_32_bits(rx_tid->paddr); cmd.addr_hi = upper_32_bits(rx_tid->paddr); @@ -841,9 +833,11 @@ void ath11k_peer_rx_tid_delete(struct ath11k *ar, dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size, DMA_BIDIRECTIONAL); kfree(rx_tid->vaddr); + rx_tid->vaddr = NULL; } - rx_tid->active = false; + rx_tid->paddr = 0; + rx_tid->size = 0; } static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab, @@ -990,6 +984,7 @@ static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab, dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, DMA_BIDIRECTIONAL); kfree(rx_tid->vaddr); + rx_tid->vaddr = NULL; rx_tid->active = false; @@ -1014,7 +1009,8 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, peer = ath11k_peer_find(ab, vdev_id, peer_mac); if (!peer) { - ath11k_warn(ab, "failed to find the peer to set up rx tid\n"); + ath11k_warn(ab, "failed to find the peer %pM to set up rx tid\n", + peer_mac); spin_unlock_bh(&ab->base_lock); return -ENOENT; } @@ -1027,7 +1023,8 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, ba_win_sz, ssn, true); spin_unlock_bh(&ab->base_lock); if (ret) { - ath11k_warn(ab, "failed to update reo for rx tid %d\n", tid); + ath11k_warn(ab, "failed to update reo for peer %pM rx tid %d\n: %d", + peer_mac, tid, ret); return ret; } @@ -1035,8 +1032,8 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, peer_mac, paddr, tid, 1, ba_win_sz); if (ret) - ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n", - tid, ret); + ath11k_warn(ab, "failed to send wmi rx reorder queue for peer %pM tid %d: %d\n", + peer_mac, tid, ret); return ret; } @@ -1069,6 +1066,8 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, ret = dma_mapping_error(ab->dev, paddr); if (ret) { spin_unlock_bh(&ab->base_lock); + ath11k_warn(ab, "failed to setup dma map for peer %pM rx tid %d: %d\n", + peer_mac, tid, ret); goto err_mem_free; } @@ -1082,15 +1081,16 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, paddr, tid, 1, ba_win_sz); if (ret) { - ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d)\n", - tid, ret); + ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n", + peer_mac, tid, ret); ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid); } return ret; err_mem_free: - kfree(vaddr); + kfree(rx_tid->vaddr); + rx_tid->vaddr = NULL; return ret; } @@ -2665,6 +2665,9 @@ try_again: cookie); mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie); + if (unlikely(buf_id == 0)) + continue; + ar = ab->pdevs[mac_id].ar; rx_ring = &ar->dp.rx_refill_buf_ring; spin_lock_bh(&rx_ring->idr_lock); @@ -3029,39 +3032,51 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, spin_lock_bh(&rx_ring->idr_lock); skb = idr_find(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + if (!skb) { ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n", buf_id); - spin_unlock_bh(&rx_ring->idr_lock); pmon->buf_state = DP_MON_STATUS_REPLINISH; goto move_next; } - idr_remove(&rx_ring->bufs_idr, buf_id); - spin_unlock_bh(&rx_ring->idr_lock); - rxcb = ATH11K_SKB_RXCB(skb); - dma_unmap_single(ab->dev, rxcb->paddr, - skb->len + skb_tailroom(skb), - DMA_FROM_DEVICE); + dma_sync_single_for_cpu(ab->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); tlv = (struct hal_tlv_hdr *)skb->data; if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_STATUS_BUFFER_DONE) { - ath11k_warn(ab, "mon status DONE not set %lx\n", + ath11k_warn(ab, "mon status DONE not set %lx, buf_id %d\n", FIELD_GET(HAL_TLV_HDR_TAG, - tlv->tl)); - dev_kfree_skb_any(skb); + tlv->tl), buf_id); + /* If done status is missing, hold onto status + * ring until status is done for this status + * ring buffer. + * Keep HP in mon_status_ring unchanged, + * and break from here. + * Check status for same buffer for next time + */ pmon->buf_state = DP_MON_STATUS_NO_DMA; - goto move_next; + break; } + spin_lock_bh(&rx_ring->idr_lock); + idr_remove(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); if (ab->hw_params.full_monitor_mode) { ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv); if (paddr == pmon->mon_status_paddr) pmon->buf_state = DP_MON_STATUS_MATCH; } + + dma_unmap_single(ab->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + __skb_queue_tail(skb_list, skb); } else { pmon->buf_state = DP_MON_STATUS_REPLINISH; @@ -3117,8 +3132,11 @@ int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id int i; tfm = crypto_alloc_shash("michael_mic", 0, 0); - if (IS_ERR(tfm)) + if (IS_ERR(tfm)) { + ath11k_warn(ab, "failed to allocate michael_mic shash: %ld\n", + PTR_ERR(tfm)); return PTR_ERR(tfm); + } spin_lock_bh(&ab->base_lock); @@ -3138,6 +3156,7 @@ int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id } peer->tfm_mmic = tfm; + peer->dp_setup_done = true; spin_unlock_bh(&ab->base_lock); return 0; @@ -3583,6 +3602,13 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar, ret = -ENOENT; goto out_unlock; } + if (!peer->dp_setup_done) { + ath11k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n", + peer->addr, peer_id); + ret = -ENOENT; + goto out_unlock; + } + rx_tid = &peer->rx_tid[tid]; if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) || @@ -3598,7 +3624,7 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar, goto out_unlock; } - if (frag_no > __fls(rx_tid->rx_frag_bitmap)) + if (!rx_tid->rx_frag_bitmap || (frag_no > __fls(rx_tid->rx_frag_bitmap))) __skb_queue_tail(&rx_tid->rx_frags, msdu); else ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu); diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c index 8afbba236935..08a28464eb7a 100644 --- a/drivers/net/wireless/ath/ath11k/dp_tx.c +++ b/drivers/net/wireless/ath/ath11k/dp_tx.c @@ -316,10 +316,12 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab, struct dp_tx_ring *tx_ring, struct ath11k_dp_htt_wbm_tx_status *ts) { + struct ieee80211_tx_status status = { 0 }; struct sk_buff *msdu; struct ieee80211_tx_info *info; struct ath11k_skb_cb *skb_cb; struct ath11k *ar; + struct ath11k_peer *peer; spin_lock(&tx_ring->tx_idr_lock); msdu = idr_remove(&tx_ring->txbuf_idr, ts->msdu_id); @@ -341,6 +343,11 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab, dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); + if (!skb_cb->vif) { + dev_kfree_skb_any(msdu); + return; + } + memset(&info->status, 0, sizeof(info->status)); if (ts->acked) { @@ -355,7 +362,23 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab, } } - ieee80211_tx_status(ar->hw, msdu); + spin_lock_bh(&ab->base_lock); + peer = ath11k_peer_find_by_id(ab, ts->peer_id); + if (!peer || !peer->sta) { + ath11k_dbg(ab, ATH11K_DBG_DATA, + "dp_tx: failed to find the peer with peer_id %d\n", + ts->peer_id); + spin_unlock_bh(&ab->base_lock); + dev_kfree_skb_any(msdu); + return; + } + spin_unlock_bh(&ab->base_lock); + + status.sta = peer->sta; + status.info = info; + status.skb = msdu; + + ieee80211_tx_status_ext(ar->hw, &status); } static void @@ -379,7 +402,15 @@ ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base *ab, ts.msdu_id = msdu_id; ts.ack_rssi = FIELD_GET(HTT_TX_WBM_COMP_INFO1_ACK_RSSI, status_desc->info1); + + if (FIELD_GET(HTT_TX_WBM_COMP_INFO2_VALID, status_desc->info2)) + ts.peer_id = FIELD_GET(HTT_TX_WBM_COMP_INFO2_SW_PEER_ID, + status_desc->info2); + else + ts.peer_id = HTT_INVALID_PEER_ID; + ath11k_dp_tx_htt_tx_complete_buf(ab, tx_ring, &ts); + break; case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ: case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT: |