diff options
author | Prabhakar Kushwaha <pkushwaha@marvell.com> | 2021-10-04 09:58:44 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2021-10-04 12:55:49 +0100 |
commit | fe40a830dcded26f012739fd6dac0da9c805bc38 (patch) | |
tree | 6355d794adc22dc49de58d8f2f2154962e995543 | |
parent | f2a74107f1e1b43621da2220fa835b965d3eb332 (diff) | |
download | linux-fe40a830dcded26f012739fd6dac0da9c805bc38.tar.gz linux-fe40a830dcded26f012739fd6dac0da9c805bc38.tar.bz2 linux-fe40a830dcded26f012739fd6dac0da9c805bc38.zip |
qed: Update qed_hsi.h for fw 8.59.1.0
The qed_hsi.h has been updated to support new FW version 8.59.1.0 with
changes.
- Updates FW HSI (Hardware Software interface) structures.
- Addition/update in function declaration and defines as per HSI.
- Add generic infrastructure for FW error reporting as part of
common event queue handling.
- Move malicious VF error reporting to FW error reporting
infrastructure.
- Move consolidation queue initialization from FW context to ramrod
message.
qed_hsi.h header file changes lead to change in many files to ensure
compilation.
This patch also fixes the existing checkpatch warnings and few important
checks.
Signed-off-by: Ariel Elior <aelior@marvell.com>
Signed-off-by: Shai Malin <smalin@marvell.com>
Signed-off-by: Omkar Kulkarni <okulkarni@marvell.com>
Signed-off-by: Prabhakar Kushwaha <pkushwaha@marvell.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_dev.c | 112 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_hsi.h | 1556 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c | 14 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_l2.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_l2.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_sp.h | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_spq.c | 50 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_sriov.c | 112 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_sriov.h | 27 | ||||
-rw-r--r-- | include/linux/qed/eth_common.h | 1 | ||||
-rw-r--r-- | include/linux/qed/rdma_common.h | 1 |
12 files changed, 1590 insertions, 308 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 3db1a5512b9b..dad5cd219b0e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1397,12 +1397,13 @@ void qed_resc_free(struct qed_dev *cdev) qed_rdma_info_free(p_hwfn); } + qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON); qed_iov_free(p_hwfn); qed_l2_free(p_hwfn); qed_dmae_info_free(p_hwfn); qed_dcbx_info_free(p_hwfn); qed_dbg_user_data_free(p_hwfn); - qed_fw_overlay_mem_free(p_hwfn, p_hwfn->fw_overlay_mem); + qed_fw_overlay_mem_free(p_hwfn, &p_hwfn->fw_overlay_mem); /* Destroy doorbell recovery mechanism */ qed_db_recovery_teardown(p_hwfn); @@ -1484,8 +1485,8 @@ static u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn) u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn); /* num RLs can't exceed resource amount of rls or vports */ - num_pf_rls = (u16) min_t(u32, RESC_NUM(p_hwfn, QED_RL), - RESC_NUM(p_hwfn, QED_VPORT)); + num_pf_rls = (u16)min_t(u32, RESC_NUM(p_hwfn, QED_RL), + RESC_NUM(p_hwfn, QED_VPORT)); /* Make sure after we reserve there's something left */ if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) @@ -1533,8 +1534,8 @@ static void qed_init_qm_params(struct qed_hwfn *p_hwfn) bool four_port; /* pq and vport bases for this PF */ - qm_info->start_pq = (u16) RESC_START(p_hwfn, QED_PQ); - qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT); + qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ); + qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT); /* rate limiting and weighted fair queueing are always enabled */ qm_info->vport_rl_en = true; @@ -1629,9 +1630,9 @@ static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn) */ /* flags for pq init */ -#define PQ_INIT_SHARE_VPORT (1 << 0) -#define PQ_INIT_PF_RL (1 << 1) -#define PQ_INIT_VF_RL (1 << 2) +#define PQ_INIT_SHARE_VPORT BIT(0) +#define PQ_INIT_PF_RL BIT(1) +#define PQ_INIT_VF_RL BIT(2) /* defines for pq init */ #define PQ_INIT_DEFAULT_WRR_GROUP 1 @@ -2291,7 +2292,7 @@ int qed_resc_alloc(struct qed_dev *cdev) goto alloc_no_mem; } - rc = qed_eq_alloc(p_hwfn, (u16) n_eqes); + rc = qed_eq_alloc(p_hwfn, (u16)n_eqes); if (rc) goto alloc_err; @@ -2376,6 +2377,49 @@ alloc_err: return rc; } +static int qed_fw_err_handler(struct qed_hwfn *p_hwfn, + u8 opcode, + u16 echo, + union event_ring_data *data, u8 fw_return_code) +{ + if (fw_return_code != COMMON_ERR_CODE_ERROR) + goto eqe_unexpected; + + if (data->err_data.recovery_scope == ERR_SCOPE_FUNC && + le16_to_cpu(data->err_data.entity_id) >= MAX_NUM_PFS) { + qed_sriov_vfpf_malicious(p_hwfn, &data->err_data); + return 0; + } + +eqe_unexpected: + DP_ERR(p_hwfn, + "Skipping unexpected eqe 0x%02x, FW return code 0x%x, echo 0x%x\n", + opcode, fw_return_code, echo); + return -EINVAL; +} + +static int qed_common_eqe_event(struct qed_hwfn *p_hwfn, + u8 opcode, + __le16 echo, + union event_ring_data *data, + u8 fw_return_code) +{ + switch (opcode) { + case COMMON_EVENT_VF_PF_CHANNEL: + case COMMON_EVENT_VF_FLR: + return qed_sriov_eqe_event(p_hwfn, opcode, echo, data, + fw_return_code); + case COMMON_EVENT_FW_ERROR: + return qed_fw_err_handler(p_hwfn, opcode, + le16_to_cpu(echo), data, + fw_return_code); + default: + DP_INFO(p_hwfn->cdev, "Unknown eqe event 0x%02x, echo 0x%x\n", + opcode, echo); + return -EINVAL; + } +} + void qed_resc_setup(struct qed_dev *cdev) { int i; @@ -2404,6 +2448,8 @@ void qed_resc_setup(struct qed_dev *cdev) qed_l2_setup(p_hwfn); qed_iov_setup(p_hwfn); + qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON, + qed_common_eqe_event); #ifdef CONFIG_QED_LL2 if (p_hwfn->using_ll2) qed_ll2_setup(p_hwfn); @@ -2593,7 +2639,7 @@ static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn, cache_line_size); } - if (L1_CACHE_BYTES > wr_mbs) + if (wr_mbs < L1_CACHE_BYTES) DP_INFO(p_hwfn, "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n", L1_CACHE_BYTES, wr_mbs); @@ -2609,13 +2655,21 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int hw_mode) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; - struct qed_qm_common_rt_init_params params; + struct qed_qm_common_rt_init_params *params; struct qed_dev *cdev = p_hwfn->cdev; u8 vf_id, max_num_vfs; u16 num_pfs, pf_id; u32 concrete_fid; int rc = 0; + params = kzalloc(sizeof(*params), GFP_KERNEL); + if (!params) { + DP_NOTICE(p_hwfn->cdev, + "Failed to allocate common init params\n"); + + return -ENOMEM; + } + qed_init_cau_rt_data(cdev); /* Program GTT windows */ @@ -2628,16 +2682,15 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, qm_info->pf_wfq_en = true; } - memset(¶ms, 0, sizeof(params)); - params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine; - params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; - params.pf_rl_en = qm_info->pf_rl_en; - params.pf_wfq_en = qm_info->pf_wfq_en; - params.global_rl_en = qm_info->vport_rl_en; - params.vport_wfq_en = qm_info->vport_wfq_en; - params.port_params = qm_info->qm_port_params; + params->max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine; + params->max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; + params->pf_rl_en = qm_info->pf_rl_en; + params->pf_wfq_en = qm_info->pf_wfq_en; + params->global_rl_en = qm_info->vport_rl_en; + params->vport_wfq_en = qm_info->vport_wfq_en; + params->port_params = qm_info->qm_port_params; - qed_qm_common_rt_init(p_hwfn, ¶ms); + qed_qm_common_rt_init(p_hwfn, params); qed_cxt_hw_init_common(p_hwfn); @@ -2645,7 +2698,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); if (rc) - return rc; + goto out; qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); @@ -2664,7 +2717,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB; for (vf_id = 0; vf_id < max_num_vfs; vf_id++) { concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id); - qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid); qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0); qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1); @@ -2673,6 +2726,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, /* pretend to original PF */ qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); +out: + kfree(params); + return rc; } @@ -2785,7 +2841,7 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) qed_rdma_dpm_bar(p_hwfn, p_ptt); } - p_hwfn->wid_count = (u16) n_cpus; + p_hwfn->wid_count = (u16)n_cpus; DP_INFO(p_hwfn, "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n", @@ -3504,8 +3560,8 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) static void get_function_id(struct qed_hwfn *p_hwfn) { /* ME Register */ - p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn, - PXP_PF_ME_OPAQUE_ADDR); + p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, + PXP_PF_ME_OPAQUE_ADDR); p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); @@ -3671,12 +3727,14 @@ u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type) return qed_hsi_def_val[type][chip_id]; } + static int qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 resc_max_val, mcp_resp; u8 res_id; int rc; + for (res_id = 0; res_id < QED_MAX_RESC; res_id++) { switch (res_id) { case QED_LL2_RAM_QUEUE: @@ -3922,7 +3980,7 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) * resources allocation queries should be atomic. Since several PFs can * run in parallel - a resource lock is needed. * If either the resource lock or resource set value commands are not - * supported - skip the the max values setting, release the lock if + * supported - skip the max values setting, release the lock if * needed, and proceed to the queries. Other failures, including a * failure to acquire the lock, will cause this function to fail. */ @@ -4776,7 +4834,7 @@ int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id) if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) { u16 min, max; - min = (u16) RESC_START(p_hwfn, QED_L2_QUEUE); + min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE); DP_NOTICE(p_hwfn, "l2_queue id [%d] is not valid, available indices [%d - %d]\n", diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 68eaef8ab6e8..f2cedbd9489c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * Copyright (c) 2019-2020 Marvell International Ltd. + * Copyright (c) 2019-2021 Marvell International Ltd. */ #ifndef _QED_HSI_H @@ -38,7 +38,7 @@ enum common_event_opcode { COMMON_EVENT_VF_PF_CHANNEL, COMMON_EVENT_VF_FLR, COMMON_EVENT_PF_UPDATE, - COMMON_EVENT_MALICIOUS_VF, + COMMON_EVENT_FW_ERROR, COMMON_EVENT_RL_UPDATE, COMMON_EVENT_EMPTY, MAX_COMMON_EVENT_OPCODE @@ -84,6 +84,13 @@ enum core_l4_pseudo_checksum_mode { MAX_CORE_L4_PSEUDO_CHECKSUM_MODE }; +/* LL2 SP error code */ +enum core_ll2_error_code { + LL2_OK = 0, + LL2_ERROR, + MAX_CORE_LL2_ERROR_CODE +}; + /* Light-L2 RX Producers in Tstorm RAM */ struct core_ll2_port_stats { struct regpair gsi_invalid_hdr; @@ -123,6 +130,15 @@ struct core_ll2_ustorm_per_queue_stat { struct regpair rcv_bcast_pkts; }; +struct core_ll2_rx_per_queue_stat { + struct core_ll2_tstorm_per_queue_stat tstorm_stat; + struct core_ll2_ustorm_per_queue_stat ustorm_stat; +}; + +struct core_ll2_tx_per_queue_stat { + struct core_ll2_pstorm_per_queue_stat pstorm_stat; +}; + /* Structure for doorbell data, in PWM mode, for RX producers update. */ struct core_pwm_prod_update_data { __le16 icid; /* internal CID */ @@ -135,6 +151,15 @@ struct core_pwm_prod_update_data { struct core_ll2_rx_prod prod; /* Producers */ }; +/* Ramrod data for rx/tx queue statistics query ramrod */ +struct core_queue_stats_query_ramrod_data { + u8 rx_stat; + u8 tx_stat; + __le16 reserved[3]; + struct regpair rx_stat_addr; + struct regpair tx_stat_addr; +}; + /* Core Ramrod Command IDs (light L2) */ enum core_ramrod_cmd_id { CORE_RAMROD_UNUSED, @@ -210,7 +235,8 @@ struct core_rx_fast_path_cqe { __le16 vlan; struct core_rx_cqe_opaque_data opaque_data; struct parsing_err_flags err_flags; - __le16 reserved0; + u8 packet_source; + u8 reserved0; __le32 reserved1[3]; }; @@ -226,7 +252,8 @@ struct core_rx_gsi_offload_cqe { __le16 qp_id; __le32 src_qp; struct core_rx_cqe_opaque_data opaque_data; - __le32 reserved; + u8 packet_source; + u8 reserved[3]; }; /* Core RX CQE for Light L2 */ @@ -245,6 +272,15 @@ union core_rx_cqe_union { struct core_rx_slow_path_cqe rx_cqe_sp; }; +/* RX packet source. */ +enum core_rx_pkt_source { + CORE_RX_PKT_SOURCE_NETWORK = 0, + CORE_RX_PKT_SOURCE_LB, + CORE_RX_PKT_SOURCE_TX, + CORE_RX_PKT_SOURCE_LL2_TX, + MAX_CORE_RX_PKT_SOURCE +}; + /* Ramrod data for rx queue start ramrod */ struct core_rx_start_ramrod_data { struct regpair bd_base; @@ -362,7 +398,7 @@ struct core_tx_update_ramrod_data { u8 update_qm_pq_id_flg; u8 reserved0; __le16 qm_pq_id; - __le32 reserved1; + __le32 reserved1[1]; }; /* Enum flag for what type of dcb data to update */ @@ -386,12 +422,10 @@ struct pstorm_core_conn_st_ctx { /* Core Slowpath Connection storm context of Xstorm */ struct xstorm_core_conn_st_ctx { - __le32 spq_base_lo; - __le32 spq_base_hi; - struct regpair consolid_base_addr; + struct regpair spq_base_addr; + __le32 reserved0[2]; __le16 spq_cons; - __le16 consolid_cons; - __le32 reserved0[55]; + __le16 reserved1[111]; }; struct xstorm_core_conn_ag_ctx { @@ -930,12 +964,12 @@ struct eth_rx_rate_limit { /* Update RSS indirection table entry command */ struct eth_tstorm_rss_update_data { - u8 valid; u8 vport_id; u8 ind_table_index; - u8 reserved; __le16 ind_table_value; __le16 reserved1; + u8 reserved; + u8 valid; }; struct eth_ustorm_per_pf_stat { @@ -967,19 +1001,20 @@ struct vf_pf_channel_eqe_data { struct regpair msg_addr; }; -/* Event Ring malicious VF data */ -struct malicious_vf_eqe_data { - u8 vf_id; - u8 err_id; - __le16 reserved[3]; -}; - /* Event Ring initial cleanup data */ struct initial_cleanup_eqe_data { u8 vf_id; u8 reserved[7]; }; +/* FW error data */ +struct fw_err_data { + u8 recovery_scope; + u8 err_id; + __le16 entity_id; + u8 reserved[4]; +}; + /* Event Data Union */ union event_ring_data { u8 bytes[8]; @@ -987,8 +1022,8 @@ union event_ring_data { struct iscsi_eqe_data iscsi_info; struct iscsi_connect_done_results iscsi_conn_done_info; union rdma_eqe_data rdma_data; - struct malicious_vf_eqe_data malicious_vf; struct initial_cleanup_eqe_data vf_init_cleanup; + struct fw_err_data err_data; }; /* Event Ring Entry */ @@ -1042,6 +1077,15 @@ struct hsi_fp_ver_struct { u8 major_ver_arr[2]; }; +/* Integration Phase */ +enum integ_phase { + INTEG_PHASE_BB_A0_LATEST = 3, + INTEG_PHASE_BB_B0_NO_MCP = 10, + INTEG_PHASE_BB_B0_WITH_MCP = 11, + MAX_INTEG_PHASE +}; + +/* Ports mode */ enum iwarp_ll2_tx_queues { IWARP_LL2_IN_ORDER_TX_QUEUE = 1, IWARP_LL2_ALIGNED_TX_QUEUE, @@ -1050,9 +1094,9 @@ enum iwarp_ll2_tx_queues { MAX_IWARP_LL2_TX_QUEUES }; -/* Malicious VF error ID */ -enum malicious_vf_error_id { - MALICIOUS_VF_NO_ERROR, +/* Function error ID */ +enum func_err_id { + FUNC_NO_ERROR, VF_PF_CHANNEL_NOT_READY, VF_ZONE_MSG_NOT_VALID, VF_ZONE_FUNC_NOT_ENABLED, @@ -1087,7 +1131,27 @@ enum malicious_vf_error_id { CORE_PACKET_SIZE_TOO_LARGE, CORE_ILLEGAL_BD_FLAGS, CORE_GSI_PACKET_VIOLATION, - MAX_MALICIOUS_VF_ERROR_ID, + MAX_FUNC_ERR_ID +}; + +/* FW error handling mode */ +enum fw_err_mode { + FW_ERR_FATAL_ASSERT, + FW_ERR_DRV_REPORT, + MAX_FW_ERR_MODE +}; + +/* FW error recovery scope */ +enum fw_err_recovery_scope { + ERR_SCOPE_INVALID, + ERR_SCOPE_TX_Q, + ERR_SCOPE_RX_Q, + ERR_SCOPE_QP, + ERR_SCOPE_VPORT, + ERR_SCOPE_FUNC, + ERR_SCOPE_PORT, + ERR_SCOPE_ENGINE, + MAX_FW_ERR_RECOVERY_SCOPE }; /* Mstorm non-triggering VF zone */ @@ -1148,7 +1212,7 @@ struct pf_start_tunnel_config { /* Ramrod data for PF start ramrod */ struct pf_start_ramrod_data { struct regpair event_ring_pbl_addr; - struct regpair consolid_q_pbl_addr; + struct regpair consolid_q_pbl_base_addr; struct pf_start_tunnel_config tunnel_config; __le16 event_ring_sb_id; u8 base_vf_id; @@ -1166,6 +1230,9 @@ struct pf_start_ramrod_data { u8 reserved0; struct hsi_fp_ver_struct hsi_fp_ver; struct outer_tag_config_struct outer_tag_config; + u8 pf_fp_err_mode; + u8 consolid_q_num_pages; + u8 reserved[6]; }; /* Data for port update ramrod */ @@ -1230,6 +1297,13 @@ enum ports_mode { MAX_PORTS_MODE }; +/* Protocol-common error code */ +enum protocol_common_error_code { + COMMON_ERR_CODE_OK = 0, + COMMON_ERR_CODE_ERROR, + MAX_PROTOCOL_COMMON_ERROR_CODE +}; + /* use to index in hsi_fp_[major|minor]_ver_arr per protocol */ enum protocol_version_array_key { ETH_VER_KEY = 0, @@ -1704,6 +1778,7 @@ struct igu_msix_vector { #define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF #define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24 }; + /* per encapsulation type enabling flags */ struct prs_reg_encapsulation_type_en { u8 flags; @@ -1881,6 +1956,9 @@ struct init_nig_pri_tc_map_req { /* QM per global RL init parameters */ struct init_qm_global_rl_params { + u8 type; + u8 reserved0; + u16 reserved1; u32 rate_limit; }; @@ -1895,18 +1973,33 @@ struct init_qm_port_params { /* QM per-PQ init parameters */ struct init_qm_pq_params { - u8 vport_id; + u16 vport_id; + u16 rl_id; + u8 rl_valid; u8 tc_id; u8 wrr_group; - u8 rl_valid; - u16 rl_id; u8 port_id; - u8 reserved; +}; + +/* QM per RL init parameters */ +struct init_qm_rl_params { + u32 vport_rl; + u8 vport_rl_type; + u8 reserved[3]; +}; + +/* QM Rate Limiter types */ +enum init_qm_rl_type { + QM_RL_TYPE_NORMAL, + QM_RL_TYPE_QCN, + MAX_INIT_QM_RL_TYPE }; /* QM per-vport init parameters */ struct init_qm_vport_params { u16 wfq; + u16 reserved; + u16 tc_wfq[NUM_OF_TCS]; u16 first_tx_pq_id[NUM_OF_TCS]; }; @@ -1965,14 +2058,14 @@ struct fw_info_location { }; enum init_modes { - MODE_RESERVED, + MODE_BB_A0_DEPRECATED, MODE_BB, MODE_K2, MODE_ASIC, - MODE_RESERVED2, - MODE_RESERVED3, - MODE_RESERVED4, - MODE_RESERVED5, + MODE_EMUL_REDUCED, + MODE_EMUL_FULL, + MODE_FPGA, + MODE_CHIPSIM, MODE_SF, MODE_MF_SD, MODE_MF_SI, @@ -1980,8 +2073,8 @@ enum init_modes { MODE_PORTS_PER_ENG_2, MODE_PORTS_PER_ENG_4, MODE_100G, - MODE_RESERVED6, - MODE_RESERVED7, + MODE_SKIP_PRAM_INIT, + MODE_EMUL_MAC, MAX_INIT_MODES }; @@ -2282,6 +2375,15 @@ struct iro { /* Win 13 */ #define GTT_BAR0_MAP_REG_PSDM_RAM 0x01a000UL +/* Returns the VOQ based on port and TC */ +#define VOQ(port, tc, max_phys_tcs_per_port) ((tc) == \ + PURE_LB_TC ? NUM_OF_PHYS_TCS *\ + MAX_NUM_PORTS_BB + \ + (port) : (port) * \ + (max_phys_tcs_per_port) + (tc)) + +struct init_qm_pq_params; + /** * qed_qm_pf_mem_size(): Prepare QM ILT sizes. * @@ -2308,8 +2410,19 @@ struct qed_qm_common_rt_init_params { bool global_rl_en; bool vport_wfq_en; struct init_qm_port_params *port_params; + struct init_qm_global_rl_params + global_rl_params[COMMON_MAX_QM_GLOBAL_RLS]; }; +/** + * qed_qm_common_rt_init(): Prepare QM runtime init values for the + * engine phase. + * + * @p_hwfn: HW device data. + * @p_params: Parameters. + * + * Return: 0 on success, -1 on error. + */ int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn, struct qed_qm_common_rt_init_params *p_params); @@ -2326,15 +2439,28 @@ struct qed_qm_pf_rt_init_params { u16 num_vf_pqs; u16 start_vport; u16 num_vports; + u16 start_rl; + u16 num_rls; u16 pf_wfq; u32 pf_rl; + u32 link_speed; struct init_qm_pq_params *pq_params; struct init_qm_vport_params *vport_params; + struct init_qm_rl_params *rl_params; }; +/** + * qed_qm_pf_rt_init(): Prepare QM runtime init values for the PF phase. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers + * @p_params: Parameters. + * + * Return: 0 on success, -1 on error. + */ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_qm_pf_rt_init_params *p_params); + struct qed_ptt *p_ptt, + struct qed_qm_pf_rt_init_params *p_params); /** * qed_init_pf_wfq(): Initializes the WFQ weight of the specified PF. @@ -2379,6 +2505,22 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq); /** + * qed_init_vport_tc_wfq(): Initializes the WFQ weight of the specified + * VPORT and TC. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @first_tx_pq_id: The first Tx PQ ID associated with the VPORT and TC. + * (filled by qed_qm_pf_rt_init). + * @weight: VPORT+TC WFQ weight. + * + * Return: 0 on success, -1 on error. + */ +int qed_init_vport_tc_wfq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 first_tx_pq_id, u16 weight); + +/** * qed_init_global_rl(): Initializes the rate limit of the specified * rate limiter. * @@ -2386,12 +2528,14 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, * @p_ptt: Ptt window used for writing the registers. * @rl_id: RL ID. * @rate_limit: Rate limit in Mb/sec units + * @vport_rl_type: Vport RL type. * * Return: 0 on success, -1 on error. */ int qed_init_global_rl(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u16 rl_id, u32 rate_limit); + u16 rl_id, u32 rate_limit, + enum init_qm_rl_type vport_rl_type); /** * qed_send_qm_stop_cmd(): Sends a stop command to the QM. @@ -2627,7 +2771,19 @@ void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn, * Return: Void. */ void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn, - struct phys_mem_desc *fw_overlay_mem); + struct phys_mem_desc **fw_overlay_mem); + +#define PCICFG_OFFSET 0x2000 +#define GRC_CONFIG_REG_PF_INIT_VF 0x624 + +/* First VF_NUM for PF is encoded in this register. + * The number of VFs assigned to a PF is assumed to be a multiple of 8. + * Software should program these bits based on Total Number of VFs programmed + * for each PF. + * Since registers from 0x000-0x7ff are spilt across functions, each PF will + * have the same location for the same 4 bits + */ +#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xff /* Runtime array offsets */ #define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0 @@ -2958,116 +3114,118 @@ void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn, #define QM_REG_TXPQMAP_RT_SIZE 512 #define QM_REG_WFQVPWEIGHT_RT_OFFSET 31556 #define QM_REG_WFQVPWEIGHT_RT_SIZE 512 -#define QM_REG_WFQVPCRD_RT_OFFSET 32068 +#define QM_REG_WFQVPUPPERBOUND_RT_OFFSET 32068 +#define QM_REG_WFQVPUPPERBOUND_RT_SIZE 512 +#define QM_REG_WFQVPCRD_RT_OFFSET 32580 #define QM_REG_WFQVPCRD_RT_SIZE 512 -#define QM_REG_WFQVPMAP_RT_OFFSET 32580 +#define QM_REG_WFQVPMAP_RT_OFFSET 33092 #define QM_REG_WFQVPMAP_RT_SIZE 512 -#define QM_REG_PTRTBLTX_RT_OFFSET 33092 +#define QM_REG_PTRTBLTX_RT_OFFSET 33604 #define QM_REG_PTRTBLTX_RT_SIZE 1024 -#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34116 +#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34628 #define QM_REG_WFQPFCRD_MSB_RT_SIZE 160 -#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34276 -#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 34277 -#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34278 -#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34279 -#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34280 -#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34281 -#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34282 -#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34283 +#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34788 +#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 34789 +#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34790 +#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34791 +#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34792 +#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34793 +#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34794 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34795 #define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34287 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34799 #define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34291 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34803 #define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 -#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34323 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34835 #define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34339 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34851 #define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34355 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34867 #define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34371 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34883 #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 -#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34387 -#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 34388 +#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34899 +#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 34900 #define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8 -#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34396 -#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34397 -#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34398 -#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34399 -#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34400 -#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34401 -#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34402 -#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34403 -#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34404 -#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34405 -#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34406 -#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34407 -#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34408 -#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34409 -#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34410 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34411 -#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34412 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34413 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34414 -#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34415 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34416 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34417 -#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34418 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34419 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34420 -#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34421 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34422 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34423 -#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34424 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34425 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34426 -#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34427 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34428 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34429 -#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34430 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34431 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34432 -#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34433 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34434 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34435 -#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34436 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34437 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34438 -#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34439 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34440 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34441 -#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34442 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34443 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34444 -#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34445 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34446 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34447 -#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34448 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34449 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34450 -#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34451 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34452 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34453 -#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34454 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34455 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34456 -#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34457 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34458 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34459 -#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34460 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34461 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34462 -#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34463 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34464 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34465 -#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34466 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34467 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34468 -#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34469 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34470 -#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34471 - -#define RUNTIME_ARRAY_SIZE 34472 +#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34908 +#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34909 +#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34910 +#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34911 +#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34912 +#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34913 +#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34914 +#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34915 +#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34916 +#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34917 +#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34918 +#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34919 +#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34920 +#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34921 +#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34922 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34923 +#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34924 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34925 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34926 +#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34927 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34928 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34929 +#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34930 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34931 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34932 +#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34933 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34934 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34935 +#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34936 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34937 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34938 +#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34939 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34940 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34941 +#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34942 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34943 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34944 +#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34945 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34946 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34947 +#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34948 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 3 |