diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/infiniband/core/iwcm.c | 41 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/ib_verbs.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/qplib_fp.c | 12 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/qplib_fp.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/qplib_res.h | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/qplib_sp.c | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/qplib_sp.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/roce_hsi.h | 30 | ||||
-rw-r--r-- | drivers/infiniband/hw/efa/efa_com.c | 30 | ||||
-rw-r--r-- | drivers/infiniband/hw/efa/efa_main.c | 30 | ||||
-rw-r--r-- | drivers/infiniband/hw/irdma/main.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mana/device.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/hw/mana/main.c | 109 | ||||
-rw-r--r-- | drivers/infiniband/hw/mana/mana_ib.h | 128 | ||||
-rw-r--r-- | drivers/infiniband/hw/mana/qp.c | 198 | ||||
-rw-r--r-- | drivers/net/ethernet/microsoft/mana/gdma_main.c | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/microsoft/mana/mana_en.c | 85 | ||||
-rw-r--r-- | drivers/net/ethernet/microsoft/mana/mana_ethtool.c | 27 |
18 files changed, 622 insertions, 89 deletions
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index 0301fcad4b48..1a6339f3a63f 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c @@ -143,8 +143,8 @@ static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv) if (list_empty(&cm_id_priv->work_free_list)) return NULL; - work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work, - free_list); + work = list_first_entry(&cm_id_priv->work_free_list, struct iwcm_work, + free_list); list_del_init(&work->free_list); return work; } @@ -206,17 +206,17 @@ static void free_cm_id(struct iwcm_id_private *cm_id_priv) /* * Release a reference on cm_id. If the last reference is being - * released, free the cm_id and return 1. + * released, free the cm_id and return 'true'. */ -static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv) +static bool iwcm_deref_id(struct iwcm_id_private *cm_id_priv) { if (refcount_dec_and_test(&cm_id_priv->refcount)) { BUG_ON(!list_empty(&cm_id_priv->work_list)); free_cm_id(cm_id_priv); - return 1; + return true; } - return 0; + return false; } static void add_ref(struct iw_cm_id *cm_id) @@ -368,8 +368,10 @@ EXPORT_SYMBOL(iw_cm_disconnect); * * Clean up all resources associated with the connection and release * the initial reference taken by iw_create_cm_id. + * + * Returns true if and only if the last cm_id_priv reference has been dropped. */ -static void destroy_cm_id(struct iw_cm_id *cm_id) +static bool destroy_cm_id(struct iw_cm_id *cm_id) { struct iwcm_id_private *cm_id_priv; struct ib_qp *qp; @@ -439,7 +441,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id) iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM); } - (void)iwcm_deref_id(cm_id_priv); + return iwcm_deref_id(cm_id_priv); } /* @@ -450,7 +452,8 @@ static void destroy_cm_id(struct iw_cm_id *cm_id) */ void iw_destroy_cm_id(struct iw_cm_id *cm_id) { - destroy_cm_id(cm_id); + if (!destroy_cm_id(cm_id)) + flush_workqueue(iwcm_wq); } EXPORT_SYMBOL(iw_destroy_cm_id); @@ -1017,16 +1020,13 @@ static void cm_work_handler(struct work_struct *_work) struct iw_cm_event levent; struct iwcm_id_private *cm_id_priv = work->cm_id; unsigned long flags; - int empty; int ret = 0; spin_lock_irqsave(&cm_id_priv->lock, flags); - empty = list_empty(&cm_id_priv->work_list); - while (!empty) { - work = list_entry(cm_id_priv->work_list.next, - struct iwcm_work, list); + while (!list_empty(&cm_id_priv->work_list)) { + work = list_first_entry(&cm_id_priv->work_list, + struct iwcm_work, list); list_del_init(&work->list); - empty = list_empty(&cm_id_priv->work_list); levent = work->event; put_work(work); spin_unlock_irqrestore(&cm_id_priv->lock, flags); @@ -1034,13 +1034,11 @@ static void cm_work_handler(struct work_struct *_work) if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) { ret = process_event(cm_id_priv, &levent); if (ret) - destroy_cm_id(&cm_id_priv->id); + WARN_ON_ONCE(destroy_cm_id(&cm_id_priv->id)); } else pr_debug("dropping event %d\n", levent.event); if (iwcm_deref_id(cm_id_priv)) return; - if (empty) - return; spin_lock_irqsave(&cm_id_priv->lock, flags); } spin_unlock_irqrestore(&cm_id_priv->lock, flags); @@ -1093,11 +1091,8 @@ static int cm_event_handler(struct iw_cm_id *cm_id, } refcount_inc(&cm_id_priv->refcount); - if (list_empty(&cm_id_priv->work_list)) { - list_add_tail(&work->list, &cm_id_priv->work_list); - queue_work(iwcm_wq, &work->work); - } else - list_add_tail(&work->list, &cm_id_priv->work_list); + list_add_tail(&work->list, &cm_id_priv->work_list); + queue_work(iwcm_wq, &work->work); out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index ce9c5bae83bf..d261b09025ca 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -4201,6 +4201,9 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata) if (rdev->pacing.dbr_pacing) resp.comp_mask |= BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED; + if (_is_host_msn_table(rdev->qplib_res.dattr->dev_cap_flags2)) + resp.comp_mask |= BNXT_RE_UCNTX_CMASK_MSN_TABLE_ENABLED; + if (udata->inlen >= sizeof(ureq)) { rc = ib_copy_from_udata(&ureq, udata, min(udata->inlen, sizeof(ureq))); if (rc) diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 04258676d072..49e4a4a50bfa 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -984,7 +984,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) u16 nsge; if (res->dattr) - qp->dev_cap_flags = res->dattr->dev_cap_flags; + qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2); sq->dbinfo.flags = 0; bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, @@ -1002,7 +1002,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) sizeof(struct sq_psn_search_ext) : sizeof(struct sq_psn_search); - if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) { + if (qp->is_host_msn_tbl) { psn_sz = sizeof(struct sq_msn_search); qp->msn = 0; } @@ -1016,7 +1016,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode) : 0; /* Update msn tbl size */ - if (BNXT_RE_HW_RETX(qp->dev_cap_flags) && psn_sz) { + if (qp->is_host_msn_tbl && psn_sz) { hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)); qp->msn_tbl_sz = hwq_attr.aux_depth; qp->msn = 0; @@ -1637,7 +1637,7 @@ static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp, if (!swq->psn_search) return; /* Handle MSN differently on cap flags */ - if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) { + if (qp->is_host_msn_tbl) { bnxt_qplib_fill_msn_search(qp, wqe, swq); return; } @@ -1819,7 +1819,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, } swq = bnxt_qplib_get_swqe(sq, &wqe_idx); - bnxt_qplib_pull_psn_buff(qp, sq, swq, BNXT_RE_HW_RETX(qp->dev_cap_flags)); + bnxt_qplib_pull_psn_buff(qp, sq, swq, qp->is_host_msn_tbl); idx = 0; swq->slot_idx = hwq->prod; @@ -2009,7 +2009,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, rc = -EINVAL; goto done; } - if (!BNXT_RE_HW_RETX(qp->dev_cap_flags) || msn_update) { + if (!qp->is_host_msn_tbl || msn_update) { swq->next_psn = sq->psn & BTH_PSN_MASK; bnxt_qplib_fill_psn_search(qp, wqe, swq); } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index 7fd4506b3584..4aaac84c1b1b 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -340,7 +340,7 @@ struct bnxt_qplib_qp { struct list_head rq_flush; u32 msn; u32 msn_tbl_sz; - u16 dev_cap_flags; + bool is_host_msn_tbl; }; #define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base) diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h index 61628f7f1253..a0f78cde314f 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h @@ -554,6 +554,12 @@ static inline bool _is_hw_retx_supported(u16 dev_cap_flags) #define BNXT_RE_HW_RETX(a) _is_hw_retx_supported((a)) +static inline bool _is_host_msn_table(u16 dev_cap_ext_flags2) +{ + return (dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_MASK) == + CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_HOST_MSN_TABLE; +} + static inline u8 bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx *cctx) { return cctx->modes.dbr_pacing; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 8beeedd15061..9328db92fa6d 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -156,6 +156,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, (0x01 << RCFW_DBR_BASE_PAGE_SHIFT); attr->max_sgid = BNXT_QPLIB_NUM_GIDS_SUPPORTED; attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags); + attr->dev_cap_flags2 = le16_to_cpu(sb->dev_cap_ext_flags_2); bnxt_qplib_query_version(rcfw, attr->fw_ver); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index d33c78b96217..16a67d70a6fc 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h @@ -72,6 +72,7 @@ struct bnxt_qplib_dev_attr { u8 tqm_alloc_reqs[MAX_TQM_ALLOC_REQ]; bool is_atomic; u16 dev_cap_flags; + u16 dev_cap_flags2; u32 max_dpi; }; diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h index 605c9463c408..042530969505 100644 --- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h +++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h @@ -2157,8 +2157,36 @@ struct creq_query_func_resp_sb { __le32 tqm_alloc_reqs[12]; __le32 max_dpi; u8 max_sge_var_wqe; - u8 reserved_8; + u8 dev_cap_ext_flags; + #define CREQ_QUERY_FUNC_RESP_SB_ATOMIC_OPS_NOT_SUPPORTED 0x1UL + #define CREQ_QUERY_FUNC_RESP_SB_DRV_VERSION_RGTR_SUPPORTED 0x2UL + #define CREQ_QUERY_FUNC_RESP_SB_CREATE_QP_BATCH_SUPPORTED 0x4UL + #define CREQ_QUERY_FUNC_RESP_SB_DESTROY_QP_BATCH_SUPPORTED 0x8UL + #define CREQ_QUERY_FUNC_RESP_SB_ROCE_STATS_EXT_CTX_SUPPORTED 0x10UL + #define CREQ_QUERY_FUNC_RESP_SB_CREATE_SRQ_SGE_SUPPORTED 0x20UL + #define CREQ_QUERY_FUNC_RESP_SB_FIXED_SIZE_WQE_DISABLED 0x40UL + #define CREQ_QUERY_FUNC_RESP_SB_DCN_SUPPORTED 0x80UL __le16 max_inline_data_var_wqe; + __le32 start_qid; + u8 max_msn_table_size; + u8 reserved8_1; + __le16 dev_cap_ext_flags_2; + #define CREQ_QUERY_FUNC_RESP_SB_OPTIMIZE_MODIFY_QP_SUPPORTED 0x1UL + #define CREQ_QUERY_FUNC_RESP_SB_CHANGE_UDP_SRC_PORT_WQE_SUPPORTED 0x2UL + #define CREQ_QUERY_FUNC_RESP_SB_CQ_COALESCING_SUPPORTED 0x4UL + #define CREQ_QUERY_FUNC_RESP_SB_MEMORY_REGION_RO_SUPPORTED 0x8UL + #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_MASK 0x30UL + #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_SFT 4 + #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_HOST_PSN_TABLE (0x0UL << 4) + #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_HOST_MSN_TABLE (0x1UL << 4) + #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE (0x2UL << 4) + #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_LAST \ + CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE + __le16 max_xp_qp_size; + __le16 create_qp_batch_size; + __le16 destroy_qp_batch_size; + __le16 reserved16; + __le64 reserved64; }; /* cmdq_set_func_resources (size:448b/56B) */ diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c index 16a24a05fc2a..bafd210dd43e 100644 --- a/drivers/infiniband/hw/efa/efa_com.c +++ b/drivers/infiniband/hw/efa/efa_com.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause /* - * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved. + * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved. */ #include "efa_com.h" @@ -406,8 +406,8 @@ static struct efa_comp_ctx *efa_com_submit_admin_cmd(struct efa_com_admin_queue return comp_ctx; } -static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *aq, - struct efa_admin_acq_entry *cqe) +static int efa_com_handle_single_admin_completion(struct efa_com_admin_queue *aq, + struct efa_admin_acq_entry *cqe) { struct efa_comp_ctx *comp_ctx; u16 cmd_id; @@ -416,11 +416,11 @@ static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *a EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID); comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, false); - if (!comp_ctx) { + if (comp_ctx->status != EFA_CMD_SUBMITTED) { ibdev_err(aq->efa_dev, - "comp_ctx is NULL. Changing the admin queue running state\n"); - clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state); - return; + "Received completion with unexpected command id[%d], sq producer: %d, sq consumer: %d, cq consumer: %d\n", + cmd_id, aq->sq.pc, aq->sq.cc, aq->cq.cc); + return -EINVAL; } comp_ctx->status = EFA_CMD_COMPLETED; @@ -428,14 +428,17 @@ static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *a if (!test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state)) complete(&comp_ctx->wait_event); + + return 0; } static void efa_com_handle_admin_completion(struct efa_com_admin_queue *aq) { struct efa_admin_acq_entry *cqe; u16 queue_size_mask; - u16 comp_num = 0; + u16 comp_cmds = 0; u8 phase; + int err; u16 ci; queue_size_mask = aq->depth - 1; @@ -453,10 +456,12 @@ static void efa_com_handle_admin_completion(struct efa_com_admin_queue *aq) * phase bit was validated */ dma_rmb(); - efa_com_handle_single_admin_completion(aq, cqe); + err = efa_com_handle_single_admin_completion(aq, cqe); + if (!err) + comp_cmds++; + aq->cq.cc++; ci++; - comp_num++; if (ci == aq->depth) { ci = 0; phase = !phase; @@ -465,10 +470,9 @@ static void efa_com_handle_admin_completion(struct efa_com_admin_queue *aq) cqe = &aq->cq.entries[ci]; } - aq->cq.cc += comp_num; aq->cq.phase = phase; - aq->sq.cc += comp_num; - atomic64_add(comp_num, &aq->stats.completed_cmd); + aq->sq.cc += comp_cmds; + atomic64_add(comp_cmds, &aq->stats.completed_cmd); } static int efa_com_comp_status_to_errno(u8 comp_status) diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c index d1a48f988f6c..99f9ac23c721 100644 --- a/drivers/infiniband/hw/efa/efa_main.c +++ b/drivers/infiniband/hw/efa/efa_main.c @@ -190,15 +190,23 @@ static int efa_request_doorbell_bar(struct efa_dev *dev) { u8 db_bar_idx = dev->dev_attr.db_bar; struct pci_dev *pdev = dev->pdev; - int bars; + int pci_mem_bars; + int db_bar; int err; - if (!(BIT(db_bar_idx) & EFA_BASE_BAR_MASK)) { - bars = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(db_bar_idx); + db_bar = BIT(db_bar_idx); + if (!(db_bar & EFA_BASE_BAR_MASK)) { + pci_mem_bars = pci_select_bars(pdev, IORESOURCE_MEM); + if (db_bar & ~pci_mem_bars) { + dev_err(&pdev->dev, + "Doorbells BAR unavailable. Requested %#x, available %#x\n", + db_bar, pci_mem_bars); + return -ENODEV; + } - err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME); + err = pci_request_selected_regions(pdev, db_bar, DRV_MODULE_NAME); if (err) { - dev_err(&dev->pdev->dev, + dev_err(&pdev->dev, "pci_request_selected_regions for bar %d failed %d\n", db_bar_idx, err); return err; @@ -531,7 +539,7 @@ static struct efa_dev *efa_probe_device(struct pci_dev *pdev) { struct efa_com_dev *edev; struct efa_dev *dev; - int bars; + int pci_mem_bars; int err; err = pci_enable_device_mem(pdev); @@ -556,8 +564,14 @@ static struct efa_dev *efa_probe_device(struct pci_dev *pdev) dev->pdev = pdev; xa_init(&dev->cqs_xa); - bars = pci_select_bars(pdev, IORESOURCE_MEM) & EFA_BASE_BAR_MASK; - err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME); + pci_mem_bars = pci_select_bars(pdev, IORESOURCE_MEM); + if (EFA_BASE_BAR_MASK & ~pci_mem_bars) { + dev_err(&pdev->dev, "BARs unavailable. Requested %#x, available %#x\n", + (int)EFA_BASE_BAR_MASK, pci_mem_bars); + err = -ENODEV; + goto err_ibdev_destroy; + } + err = pci_request_selected_regions(pdev, EFA_BASE_BAR_MASK, DRV_MODULE_NAME); if (err) { dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n", err); diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h index b65bc2ea542f..9f0ed6e84471 100644 --- a/drivers/infiniband/hw/irdma/main.h +++ b/drivers/infiniband/hw/irdma/main.h @@ -239,7 +239,7 @@ struct irdma_qv_info { struct irdma_qvlist_info { u32 num_vectors; - struct irdma_qv_info qv_info[]; + struct irdma_qv_info qv_info[] __counted_by(num_vectors); }; struct irdma_gen_ops { diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c index 7e09ceb3da53..b07a8e2e838f 100644 --- a/drivers/infiniband/hw/mana/device.c +++ b/drivers/infiniband/hw/mana/device.c @@ -5,6 +5,7 @@ #include "mana_ib.h" #include <net/mana/mana_auxiliary.h> +#include <net/addrconf.h> MODULE_DESCRIPTION("Microsoft Azure Network Adapter IB driver"); MODULE_LICENSE("GPL"); @@ -92,6 +93,7 @@ static int mana_ib_probe(struct auxiliary_device *adev, goto free_ib_device; } ether_addr_copy(mac_addr, upper_ndev->dev_addr); + addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, upper_ndev->dev_addr); ret = ib_device_set_netdev(&dev->ib_dev, upper_ndev, 1); rcu_read_unlock(); if (ret) { @@ -124,6 +126,7 @@ static int mana_ib_probe(struct auxiliary_device *adev, if (ret) goto destroy_eqs; + xa_init_flags(&dev->qp_table_wq, XA_FLAGS_LOCK_IRQ); ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr); if (ret) { ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d", @@ -141,6 +144,7 @@ static int mana_ib_probe(struct auxiliary_device *adev, return 0; destroy_rnic: + xa_destroy(&dev->qp_table_wq); mana_ib_gd_destroy_rnic_adapter(dev); destroy_eqs: mana_ib_destroy_eqs(dev); @@ -156,6 +160,7 @@ static void mana_ib_remove(struct auxiliary_device *adev) struct mana_ib_dev *dev = dev_get_drvdata(&adev->dev); ib_unregister_device(&dev->ib_dev); + xa_destroy(&dev->qp_table_wq); mana_ib_gd_destroy_rnic_adapter(dev); mana_ib_destroy_eqs(dev); mana_gd_deregister_device(dev->gdma_dev); diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c index 2a411357640e..d13abc954d2a 100644 --- a/drivers/infiniband/hw/mana/main.c +++ b/drivers/infiniband/hw/mana/main.c @@ -547,14 +547,27 @@ int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props, struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev); + memset(props, 0, sizeof(*props)); + props->max_mr_size = MANA_IB_MAX_MR_SIZE; + props->page_size_cap = PAGE_SZ_BM; props->max_qp = dev->adapter_caps.max_qp_count; props->max_qp_wr = dev->adapter_caps.max_qp_wr; + props->device_cap_flags = IB_DEVICE_RC_RNR_NAK_GEN; + props->max_send_sge = dev->adapter_caps.max_send_sge_count; + props->max_recv_sge = dev->adapter_caps.max_recv_sge_count; + props->max_sge_rd = dev->adapter_caps.max_recv_sge_count; props->max_cq = dev->adapter_caps.max_cq_count; props->max_cqe = dev->adapter_caps.max_qp_wr; props->max_mr = dev->adapter_caps.max_mr_count; - props->max_mr_size = MANA_IB_MAX_MR_SIZE; - props->max_send_sge = dev->adapter_caps.max_send_sge_count; - props->max_recv_sge = dev->adapter_caps.max_recv_sge_count; + props->max_pd = dev->adapter_caps.max_pd_count; + props->max_qp_rd_atom = dev->adapter_caps.max_inbound_read_limit; + props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; + props->max_qp_init_rd_atom = dev->adapter_caps.max_outbound_read_limit; + props->atomic_cap = IB_ATOMIC_NONE; + props->masked_atomic_cap = IB_ATOMIC_NONE; + props->max_ah = INT_MAX; + props->max_pkeys = 1; + props->local_ca_ack_delay = MANA_CA_ACK_DELAY; return 0; } @@ -654,6 +667,33 @@ int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev) return 0; } +static void +mana_ib_event_handler(void *ctx, struct gdma_queue *q, struct gdma_event *event) +{ + struct mana_ib_dev *mdev = (struct mana_ib_dev *)ctx; + struct mana_ib_qp *qp; + struct ib_event ev; + u32 qpn; + + switch (event->type) { + case GDMA_EQE_RNIC_QP_FATAL: + qpn = event->details[0]; + qp = mana_get_qp_ref(mdev, qpn); + if (!qp) + break; + if (qp->ibqp.event_handler) { + ev.device = qp->ibqp.device; + ev.element.qp = &qp->ibqp; + ev.event = IB_EVENT_QP_FATAL; + qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); + } + mana_put_qp_ref(qp); + break; + default: + break; + } +} + int mana_ib_create_eqs(struct mana_ib_dev *mdev) { struct gdma_context *gc = mdev_to_gc(mdev); @@ -663,7 +703,7 @@ int mana_ib_create_eqs(struct mana_ib_dev *mdev) spec.type = GDMA_EQ; spec.monitor_avl_buf = false; spec.queue_size = EQ_SIZE; - spec.eq.callback = NULL; + spec.eq.callback = mana_ib_event_handler; spec.eq.context = mdev; spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE; spec.eq.msix_index = 0; @@ -678,7 +718,7 @@ int mana_ib_create_eqs(struct mana_ib_dev *mdev) err = -ENOMEM; goto destroy_fatal_eq; } - + spec.eq.callback = NULL; for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++) { spec.eq.msix_index = (i + 1) % gc->num_msix_usable; err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec, &mdev->eqs[i]); @@ -888,3 +928,62 @@ int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq) return 0; } + +int mana_ib_gd_create_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp, + struct ib_qp_init_attr *attr, u32 doorbell, u64 flags) +{ + struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq); + struct mana_ib_cq *recv_cq = container_of(qp->ibqp.recv_cq, struct mana_ib_cq, ibcq); + struct mana_ib_pd *pd = container_of(qp->ibqp.pd, struct mana_ib_pd, ibpd); + struct gdma_context *gc = mdev_to_gc(mdev); + struct mana_rnic_create_qp_resp resp = {}; + struct mana_rnic_create_qp_req req = {}; + int err, i; + + mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_RC_QP, sizeof(req), sizeof(resp)); + req.hdr.dev_id = gc->mana_ib.dev_id; + req.adapter = mdev->adapter_handle; + req.pd_handle = pd->pd_handle; + req.send_cq_handle = send_cq->cq_handle; + req.recv_cq_handle = recv_cq->cq_handle; + for (i = 0; i < MANA_RC_QUEUE_TYPE_MAX; i++) + req.dma_region[i] = qp->rc_qp.queues[i].gdma_region; + req.doorbell_page = doorbell; + req.max_send_wr = attr->cap.max_send_wr; + req.max_recv_wr = attr->cap.max_recv_wr; + req.max_send_sge = attr->cap.max_send_sge; + req.max_recv_sge = attr->cap.max_recv_sge; + req.flags = flags; + + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err) { + ibdev_err(&mdev->ib_dev, "Failed to create rc qp err %d", err); + return err; + } + qp->qp_handle = resp.rc_qp_handle; + for (i = 0; i < MANA_RC_QUEUE_TYPE_MAX; i++) { + qp->rc_qp.queues[i].id = resp.queue_ids[i]; + /* The GDMA regions are now owned by the RNIC QP handle */ + qp->rc_qp.queues[i].gdma_region = GDMA_INVALID_DMA_REGION; + } + return 0; +} + +int mana_ib_gd_destroy_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp) +{ + struct mana_rnic_destroy_rc_qp_resp resp = {0}; + struct mana_rnic_destroy_rc_qp_req req = {0}; + struct gdma_context *gc = mdev_to_gc(mdev); + int err; + + mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_RC_QP, sizeof(req), sizeof(resp)); + req.hdr.dev_id = gc->mana_ib.dev_id; + req.adapter = mdev->adapter_handle; + req.rc_qp_handle = qp->qp_handle; + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); + if (err) { + ibdev_err(&mdev->ib_dev, "Failed to destroy rc qp err %d", err); + return err; + } + return 0; +} diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h index 68c3b4f0faa4..977da9569701 100644 --- a/drivers/infiniband/hw/mana/mana_ib.h +++ b/drivers/infiniband/hw/mana/mana_ib.h @@ -27,6 +27,11 @@ */ #define MANA_IB_MAX_MR 0xFFFFFFu +/* + * The CA timeout is approx. 260ms (4us * 2^(DELAY)) + */ +#define MANA_CA_ACK_DELAY 16 + struct mana_ib_adapter_caps { u32 max_sq_id; u32 max_rq_id; @@ -57,6 +62,7 @@ struct mana_ib_dev { mana_handle_t adapter_handle; struct gdma_queue *fatal_err_eq; struct gdma_queue **eqs; + struct xarray qp_table_wq; struct mana_ib_adapter_caps adapter_caps; }; @@ -95,14 +101,33 @@ struct mana_ib_cq { mana_handle_t cq_handle; }; +enum mana_rc_queue_type { + MANA_RC_SEND_QUEUE_REQUESTER = 0, + MANA_RC_SEND_QUEUE_RESPONDER, + MANA_RC_SEND_QUEUE_FMR, + MANA_RC_RECV_QUEUE_REQUESTER, + MANA_RC_RECV_QUEUE_RESPONDER, + MANA_RC_QUEUE_TYPE_MAX, +}; + +struct mana_ib_rc_qp { + struct mana_ib_queue queues[MANA_RC_QUEUE_TYPE_MAX]; +}; + struct mana_ib_qp { struct ib_qp ibqp; mana_handle_t qp_handle; - struct mana_ib_queue raw_sq; + union { + struct mana_ib_queue raw_sq; + struct mana_ib_rc_qp rc_qp; + }; /* The port on the IB device, starting with 1 */ u32 port; + + refcount_t refcount; + struct completion free; }; struct mana_ib_ucontext { @@ -122,6 +147,9 @@ enum mana_ib_command_code { MANA_IB_CONFIG_MAC_ADDR = 0x30005, MANA_IB_CREATE_CQ = 0x30008, MANA_IB_DESTROY_CQ = 0x30009, + MANA_IB_CREATE_RC_QP = 0x3000a, + MANA_IB_DESTROY_RC_QP = 0x3000b, + MANA_IB_SET_QP_STATE = 0x3000d, }; struct mana_ib_query_adapter_caps_req { @@ -230,11 +258,105 @@ struct mana_rnic_destroy_cq_resp { struct gdma_resp_hdr hdr; }; /* HW Data */ +enum mana_rnic_create_rc_flags { + MANA_RC_FLAG_NO_FMR = 2, +}; + +struct mana_rnic_create_qp_req { + struct gdma_req_hdr hdr; + mana_handle_t adapter; + mana_handle_t pd_handle; + mana_handle_t send_cq_handle; + mana_handle_t recv_cq_handle; + u64 dma_region[MANA_RC_QUEUE_TYPE_MAX]; + u64 deprecated[2]; + u64 flags; + u32 doorbell_page; + u32 max_send_wr; + u32 max_recv_wr; + u32 max_send_sge; + u32 max_recv_sge; + u32 reserved; +}; /* HW Data */ + +struct mana_rnic_create_qp_resp { + struct gdma_resp_hdr hdr; + mana_handle_t rc_qp_handle; + u32 queue_ids[MANA_RC_QUEUE_TYPE_MAX]; + u32 reserved; +}; /* HW Data*/ + +struct mana_rnic_destroy_rc_qp_req { + struct gdma_req_hdr hdr; + mana_handle_t adapter; + mana_handle_t rc_qp_handle; +}; /* HW Data */ + +struct mana_rnic_destroy_rc_qp_resp { + struct gdma_resp_hdr hdr; +}; /* HW Data */ + +struct mana_ib_ah_attr { + u8 src_addr[16]; + u8 dest_addr[16]; + u8 src_mac[ETH_ALEN]; + u8 dest_mac[ETH_ALEN]; + u8 src_addr_type; + u8 dest_addr_type; + u8 hop_limit; + u8 traffic_class; + u16 src_port; + u16 dest_port; + u32 reserved; +}; + +struct mana_rnic_set_qp_state_req { + struct gdma_req_hdr hdr; + mana_handle_t adapter; + mana_handle_t qp_handle; + u64 attr_mask; + u32 qp_state; + u32 path_mtu; + u32 rq_psn; + u32 sq_psn; + u32 dest_qpn; + u32 max_dest_rd_atomic; + u32 retry_cnt; + u32 rnr_retry; + u32 min_rnr_timer; + u32 reserved; + struct mana_ib_ah_attr ah_attr; +}; /* HW Data */ + +struct mana_rnic_set_qp_state_resp { + struct gdma_resp_hdr hdr; +}; /* HW Data */ + static inline struct gdma_context *mdev_to_gc(struct mana_ib_dev *mdev) { return mdev->gdma_dev->gdma_context; } +static inline struct mana_ib_qp *mana_get_qp_ref(struct mana_ib_dev *mdev, + uint32_t qid) +{ + struct mana_ib_qp *qp; + unsigned long flag; + + xa_lock_irqsave(&mdev->qp_table_wq, flag); + qp = xa_load(&mdev->qp_table_wq, qid); + if (qp) + refcount_inc(&qp->refcount); + xa_unlock_irqrestore(&mdev->qp_table_wq, flag); + return qp; +} + +static inline void mana_put_qp_ref(struct mana_ib_qp *qp) +{ + if (refcount_dec_and_test(&qp->refcount)) + complete(&qp->free); +} + static inline struct net_device *mana_ib_get_netdev(struct ib_device *ibdev, u32 port) { struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); @@ -354,4 +476,8 @@ int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8 int mana_ib_gd_create_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq, u32 doorbell); int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq); + +int mana_ib_gd_create_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp, + struct ib_qp_init_attr *attr, u32 doorbell, u64 flags); +int mana_ib_gd_destroy_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp); #endif diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c index ba13c5abf8ef..73d67c853b6f 100644 --- a/drivers/infiniband/hw/mana/qp.c +++ b/drivers/infiniband/hw/mana/qp.c @@ -21,7 +21,7 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev, gc = mdev_to_gc(dev); - req_buf_size = struct_size(req, indir_tab, MANA_INDIRECT_TABLE_SIZE); + req_buf_size = struct_size(req, indir_tab, MANA_INDIRECT_TABLE_DEF_SIZE); req = kzalloc(req_buf_size, GFP_KERNEL); if (!req) return -ENOMEM; @@ -41,18 +41,18 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev, if (log_ind_tbl_size) req->rss_enable = true; - req->num_indir_entries = MANA_INDIRECT_TABLE_SIZE; + req->num_indir_entries = MANA_INDIRECT_TABLE_DEF_SIZE; req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2 |