diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-06-29 21:01:17 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-06-29 21:01:17 -0700 |
| commit | 7ede5f78a0d74b574791c7eb0e2ca6e54b80c93c (patch) | |
| tree | 25801e614cc8fdf3569d6deb8af8d9ec3a380703 | |
| parent | 31929ae00890d921618b0b449722dcdf4a4416cc (diff) | |
| parent | 5f004bcaee4cb552cf1b46a505f18f08777db7e5 (diff) | |
| download | linux-7ede5f78a0d74b574791c7eb0e2ca6e54b80c93c.tar.gz linux-7ede5f78a0d74b574791c7eb0e2ca6e54b80c93c.tar.bz2 linux-7ede5f78a0d74b574791c7eb0e2ca6e54b80c93c.zip | |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe:
"This cycle saw a focus on rxe and bnxt_re drivers:
- Code cleanups for irdma, rxe, rtrs, hns, vmw_pvrdma
- rxe uses workqueues instead of tasklets
- rxe has better compliance around access checks for MRs and rereg_mr
- mana supportst he 'v2' FW interface for RX coalescing
- hfi1 bug fix for stale cache entries in its MR cache
- mlx5 buf fix to handle FW failures when destroying QPs
- erdma HW has a new doorbell allocation mechanism for uverbs that is
secure
- Lots of small cleanups and rework in bnxt_re:
- Use the common mmap functions
- Support disassociation
- Improve FW command flow
- support for 'low latency push'"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (71 commits)
RDMA/bnxt_re: Fix an IS_ERR() vs NULL check
RDMA/bnxt_re: Fix spelling mistake "priviledged" -> "privileged"
RDMA/bnxt_re: Remove duplicated include in bnxt_re/main.c
RDMA/bnxt_re: Refactor code around bnxt_qplib_map_rc()
RDMA/bnxt_re: Remove incorrect return check from slow path
RDMA/bnxt_re: Enable low latency push
RDMA/bnxt_re: Reorg the bar mapping
RDMA/bnxt_re: Move the interface version to chip context structure
RDMA/bnxt_re: Query function capabilities from firmware
RDMA/bnxt_re: Optimize the bnxt_re_init_hwrm_hdr usage
RDMA/bnxt_re: Add disassociate ucontext support
RDMA/bnxt_re: Use the common mmap helper functions
RDMA/bnxt_re: Initialize opcode while sending message
RDMA/cma: Remove NULL check before dev_{put, hold}
RDMA/rxe: Simplify cq->notify code
RDMA/rxe: Fixes mr access supported list
RDMA/bnxt_re: optimize the parameters passed to helper functions
RDMA/bnxt_re: remove redundant cmdq_bitmap
RDMA/bnxt_re: use firmware provided max request timeout
RDMA/bnxt_re: cancel all control path command waiters upon error
...
58 files changed, 1955 insertions, 901 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 6b3f4384e46a..1ee87c3aaeab 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -4805,8 +4805,7 @@ static void cma_make_mc_event(int status, struct rdma_id_private *id_priv, event->param.ud.qkey = id_priv->qkey; out: - if (ndev) - dev_put(ndev); + dev_put(ndev); } static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index 2c95e6f3d47a..ea81b2497511 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -39,6 +39,7 @@ #ifndef __BNXT_RE_H__ #define __BNXT_RE_H__ +#include <rdma/uverbs_ioctl.h> #include "hw_counters.h" #define ROCE_DRV_MODULE_NAME "bnxt_re" @@ -179,10 +180,14 @@ struct bnxt_re_dev { #define BNXT_RE_ROCEV2_IPV4_PACKET 2 #define BNXT_RE_ROCEV2_IPV6_PACKET 3 +#define BNXT_RE_CHECK_RC(x) ((x) && ((x) != -ETIMEDOUT)) + static inline struct device *rdev_to_dev(struct bnxt_re_dev *rdev) { if (rdev) return &rdev->ibdev.dev; return NULL; } + +extern const struct uapi_definition bnxt_re_uapi_defs[]; #endif diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 952811c40c54..abef0b8baa7c 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -61,6 +61,15 @@ #include "bnxt_re.h" #include "ib_verbs.h" + +#include <rdma/uverbs_types.h> +#include <rdma/uverbs_std_types.h> + +#include <rdma/ib_user_ioctl_cmds.h> + +#define UVERBS_MODULE_NAME bnxt_re +#include <rdma/uverbs_named_ioctl.h> + #include <rdma/bnxt_re-abi.h> static int __from_ib_access_flags(int iflags) @@ -534,12 +543,57 @@ fail: return rc; } +static struct bnxt_re_user_mmap_entry* +bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset, + enum bnxt_re_mmap_flag mmap_flag, u64 *offset) +{ + struct bnxt_re_user_mmap_entry *entry; + int ret; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return NULL; + + entry->mem_offset = mem_offset; + entry->mmap_flag = mmap_flag; + entry->uctx = uctx; + + switch (mmap_flag) { + case BNXT_RE_MMAP_SH_PAGE: + ret = rdma_user_mmap_entry_insert_exact(&uctx->ib_uctx, + &entry->rdma_entry, PAGE_SIZE, 0); + break; + case BNXT_RE_MMAP_UC_DB: + case BNXT_RE_MMAP_WC_DB: + ret = rdma_user_mmap_entry_insert(&uctx->ib_uctx, + &entry->rdma_entry, PAGE_SIZE); + break; + default: + ret = -EINVAL; + break; + } + + if (ret) { + kfree(entry); + return NULL; + } + if (offset) + *offset = rdma_user_mmap_get_offset(&entry->rdma_entry); + + return entry; +} + /* Protection Domains */ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata) { struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); struct bnxt_re_dev *rdev = pd->rdev; + if (udata) { + rdma_user_mmap_entry_remove(pd->pd_db_mmap); + pd->pd_db_mmap = NULL; + } + bnxt_re_destroy_fence_mr(pd); if (pd->qplib_pd.id) { @@ -558,7 +612,8 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context( udata, struct bnxt_re_ucontext, ib_uctx); struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd); - int rc; + struct bnxt_re_user_mmap_entry *entry = NULL; + int rc = 0; pd->rdev = rdev; if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) { @@ -568,15 +623,15 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) } if (udata) { - struct bnxt_re_pd_resp resp; + struct bnxt_re_pd_resp resp = {}; if (!ucntx->dpi.dbr) { /* Allocate DPI in alloc_pd to avoid failing of * ibv_devinfo and family of application when DPIs * are depleted. */ - if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl, - &ucntx->dpi, ucntx)) { + if (bnxt_qplib_alloc_dpi(&rdev->qplib_res, + &ucntx->dpi, ucntx, BNXT_QPLIB_DPI_TYPE_UC)) { rc = -ENOMEM; goto dbfail; } @@ -585,12 +640,21 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) resp.pdid = pd->qplib_pd.id; /* Still allow mapping this DBR to the new user PD. */ resp.dpi = ucntx->dpi.dpi; - resp.dbr = (u64)ucntx->dpi.umdbr; - rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); + entry = bnxt_re_mmap_entry_insert(ucntx, (u64)ucntx->dpi.umdbr, + BNXT_RE_MMAP_UC_DB, &resp.dbr); + + if (!entry) { + rc = -ENOMEM; + goto dbfail; + } + + pd->pd_db_mmap = &entry->rdma_entry; + + rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen)); if (rc) { - ibdev_err(&rdev->ibdev, - "Failed to copy user response\n"); + rdma_user_mmap_entry_remove(pd->pd_db_mmap); + rc = -EFAULT; goto dbfail; } } @@ -614,12 +678,20 @@ int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags) { struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); struct bnxt_re_dev *rdev = ah->rdev; + bool block = true; + int rc = 0; - bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, - !(flags & RDMA_DESTROY_AH_SLEEPABLE)); + block = !(flags & RDMA_DESTROY_AH_SLEEPABLE); + rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, block); + if (BNXT_RE_CHECK_RC(rc)) { + if (rc == -ETIMEDOUT) + rc = 0; + else + goto fail; + } atomic_dec(&rdev->ah_count); - - return 0; +fail: + return rc; } static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype) @@ -3955,6 +4027,7 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata) container_of(ctx, struct bnxt_re_ucontext, ib_uctx); struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; + struct bnxt_re_user_mmap_entry *entry; struct bnxt_re_uctx_resp resp = {}; u32 chip_met_rev_num = 0; int rc; @@ -3993,6 +4066,16 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata) resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE; resp.mode = rdev->chip_ctx->modes.wqe_mode; + if (rdev->chip_ctx->modes.db_push) + resp.comp_mask |= BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED; + + entry = bnxt_re_mmap_entry_insert(uctx, 0, BNXT_RE_MMAP_SH_PAGE, NULL); + if (!entry) { + rc = -ENOMEM; + goto cfail; + } + uctx->shpage_mmap = &entry->rdma_entry; + rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp))); if (rc) { ibdev_err(ibdev, "Failed to copy user context"); @@ -4016,6 +4099,8 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx) struct bnxt_re_dev *rdev = uctx->rdev; + rdma_user_mmap_entry_remove(uctx->shpage_mmap); + uctx->shpage_mmap = NULL; if (uctx->shpg) free_page((unsigned long)uctx->shpg); @@ -4023,8 +4108,7 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx) /* Free DPI only if this is the first PD allocated by the * application and mark the context dpi as NULL */ - bnxt_qplib_dealloc_dpi(&rdev->qplib_res, - &rdev->qplib_res.dpi_tbl, &uctx->dpi); + bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->dpi); uctx->dpi.dbr = NULL; } } @@ -4035,27 +4119,177 @@ int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma) struct bnxt_re_ucontext *uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx); - struct bnxt_re_dev *rdev = uctx->rdev; + struct bnxt_re_user_mmap_entry *bnxt_entry; + struct rdma_user_mmap_entry *rdma_entry; + int ret = 0; u64 pfn; - if (vma->vm_end - vma->vm_start != PAGE_SIZE) + rdma_entry = rdma_user_mmap_entry_get(&uctx->ib_uctx, vma); + if (!rdma_entry) return -EINVAL; - if (vma->vm_pgoff) { - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, - PAGE_SIZE, vma->vm_page_prot)) { - ibdev_err(&rdev->ibdev, "Failed to map DPI"); - return -EAGAIN; - } - } else { - pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT; - if (remap_pfn_range(vma, vma->vm_start, - pfn, PAGE_SIZE, vma->vm_page_prot)) { - ibdev_err(&rdev->ibdev, "Failed to map shared page"); - return -EAGAIN; + bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry, + rdma_entry); + + switch (bnxt_entry->mmap_flag) { + case BNXT_RE_MMAP_WC_DB: + pfn = bnxt_entry->mem_offset >> PAGE_SHIFT; + ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE, + pgprot_writecombine(vma->vm_page_prot), + rdma_entry); + break; + case BNXT_RE_MMAP_UC_DB: + pfn = bnxt_entry->mem_offset >> PAGE_SHIFT; + ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE, + pgprot_noncached(vma->vm_page_prot), + rdma_entry); + break; + case BNXT_RE_MMAP_SH_PAGE: + ret = vm_insert_page(vma, vma->vm_start, virt_to_page(uctx->shpg)); + break; + default: + ret = -EINVAL; + break; + } + + rdma_user_mmap_entry_put(rdma_entry); + return ret; +} + +void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry) +{ + struct bnxt_re_user_mmap_entry *bnxt_entry; + + bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry, + rdma_entry); + + kfree(bnxt_entry); +} + +static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *attrs) +{ + struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_ALLOC_PAGE_HANDLE); + enum bnxt_re_alloc_page_type alloc_type; + struct bnxt_re_user_mmap_entry *entry; + enum bnxt_re_mmap_flag mmap_flag; + struct bnxt_qplib_chip_ctx *cctx; + struct bnxt_re_ucontext *uctx; + struct bnxt_re_dev *rdev; + u64 mmap_offset; + u32 length; + u32 dpi; + u64 dbr; + int err; + + uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx); + if (IS_ERR(uctx)) + return PTR_ERR(uctx); + + err = uverbs_get_const(&alloc_type, attrs, BNXT_RE_ALLOC_PAGE_TYPE); + if (err) + return err; + + rdev = uctx->rdev; + cctx = rdev->chip_ctx; + + switch (alloc_type) { + case BNXT_RE_ALLOC_WC_PAGE: + if (cctx->modes.db_push) { + if (bnxt_qplib_alloc_dpi(&rdev->qplib_res, &uctx->wcdpi, + uctx, BNXT_QPLIB_DPI_TYPE_WC)) + return -ENOMEM; + length = PAGE_SIZE; + dpi = uctx->wcdpi.dpi; + dbr = (u64)uctx->wcdpi.umdbr; + mmap_flag = BNXT_RE_MMAP_WC_DB; + } else { + return -EINVAL; } + + break; + + default: + return -EOPNOTSUPP; } + entry = bnxt_re_mmap_entry_insert(uctx, dbr, mmap_flag, &mmap_offset); + if (!entry) + return -ENOMEM; + + uobj->object = entry; + uverbs_finalize_uobj_create(attrs, BNXT_RE_ALLOC_PAGE_HANDLE); + err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_OFFSET, + &mmap_offset, sizeof(mmap_offset)); + if (err) + return err; + + err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_LENGTH, + &length, sizeof(length)); + if (err) + return err; + + err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_DPI, + &dpi, sizeof(length)); + if (err) + return err; + return 0; } + +static int alloc_page_obj_cleanup(struct ib_uobject *uobject, + enum rdma_remove_reason why, + struct uverbs_attr_bundle *attrs) +{ + struct bnxt_re_user_mmap_entry *entry = uobject->object; + struct bnxt_re_ucontext *uctx = entry->uctx; + + switch (entry->mmap_flag) { + case BNXT_RE_MMAP_WC_DB: + if (uctx && uctx->wcdpi.dbr) { + struct bnxt_re_dev *rdev = uctx->rdev; + + bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->wcdpi); + uctx->wcdpi.dbr = NULL; + } + break; + default: + goto exit; + } + rdma_user_mmap_entry_remove(&entry->rdma_entry); +exit: + return 0; +} + +DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_ALLOC_PAGE, + UVERBS_ATTR_IDR(BNXT_RE_ALLOC_PAGE_HANDLE, + BNXT_RE_OBJECT_ALLOC_PAGE, + UVERBS_ACCESS_NEW, + UA_MANDATORY), + UVERBS_ATTR_CONST_IN(BNXT_RE_ALLOC_PAGE_TYPE, + enum bnxt_re_alloc_page_type, + UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_OFFSET, + UVERBS_ATTR_TYPE(u64), + UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_LENGTH, + UVERBS_ATTR_TYPE(u32), + UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_DPI, + UVERBS_ATTR_TYPE(u32), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_DESTROY_PAGE, + UVERBS_ATTR_IDR(BNXT_RE_DESTROY_PAGE_HANDLE, + BNXT_RE_OBJECT_ALLOC_PAGE, + UVERBS_ACCESS_DESTROY, + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_ALLOC_PAGE, + UVERBS_TYPE_ALLOC_IDR(alloc_page_obj_cleanup), + &UVERBS_METHOD(BNXT_RE_METHOD_ALLOC_PAGE), + &UVERBS_METHOD(BNXT_RE_METHOD_DESTROY_PAGE)); + +const struct uapi_definition bnxt_re_uapi_defs[] = { + UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_ALLOC_PAGE), + {} +}; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index 31f7e34040f7..32d9e9d09791 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h @@ -60,6 +60,8 @@ struct bnxt_re_pd { struct bnxt_re_dev *rdev; struct bnxt_qplib_pd qplib_pd; struct bnxt_re_fence_data fence; + struct rdma_user_mmap_entry *pd_db_mmap; + struct rdma_user_mmap_entry *pd_wcdb_mmap; }; struct bnxt_re_ah { @@ -134,8 +136,23 @@ struct bnxt_re_ucontext { struct ib_ucontext ib_uctx; struct bnxt_re_dev *rdev; struct bnxt_qplib_dpi dpi; + struct bnxt_qplib_dpi wcdpi; void *shpg; spinlock_t sh_lock; /* protect shpg */ + struct rdma_user_mmap_entry *shpage_mmap; +}; + +enum bnxt_re_mmap_flag { + BNXT_RE_MMAP_SH_PAGE, + BNXT_RE_MMAP_UC_DB, + BNXT_RE_MMAP_WC_DB, +}; + +struct bnxt_re_user_mmap_entry { + struct rdma_user_mmap_entry rdma_entry; + struct bnxt_re_ucontext *uctx; + u64 mem_offset; + u8 mmap_flag; }; static inline u16 bnxt_re_get_swqe_size(int nsge) @@ -213,6 +230,8 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata); void bnxt_re_dealloc_ucontext(struct ib_ucontext *context); int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); +void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry); + unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp); void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags); diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 3073398a2183..b42166fe7454 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -83,6 +83,45 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, unsigned long event, void *ptr); static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev); static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev); +static int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev); + +static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len, + u32 *offset); +static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_chip_ctx *cctx; + struct bnxt_en_dev *en_dev; + struct bnxt_qplib_res *res; + u32 l2db_len = 0; + u32 offset = 0; + u32 barlen; + int rc; + + res = &rdev->qplib_res; + en_dev = rdev->en_dev; + cctx = rdev->chip_ctx; + + /* Issue qcfg */ + rc = bnxt_re_hwrm_qcfg(rdev, &l2db_len, &offset); + if (rc) + dev_info(rdev_to_dev(rdev), + "Couldn't get DB bar size, Low latency framework is disabled\n"); + /* set register offsets for both UC and WC */ + res->dpi_tbl.ucreg.offset = res->is_vf ? BNXT_QPLIB_DBR_VF_DB_OFFSET : + BNXT_QPLIB_DBR_PF_DB_OFFSET; + res->dpi_tbl.wcreg.offset = res->dpi_tbl.ucreg.offset; + + /* If WC mapping is disabled by L2 driver then en_dev->l2_db_size + * is equal to the DB-Bar actual size. This indicates that L2 + * is mapping entire bar as UC-. RoCE driver can't enable WC mapping + * in such cases and DB-push will be disabled. + */ + barlen = pci_resource_len(res->pdev, RCFW_DBR_PCI_BAR_REGION); + if (cctx->modes.db_push && l2db_len && en_dev->l2_db_size != barlen) { + res->dpi_tbl.wcreg.offset = en_dev->l2_db_size; + dev_info(rdev_to_dev(rdev), "Low latency framework is enabled\n"); + } +} static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode) { @@ -91,6 +130,9 @@ static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode) cctx = rdev->chip_ctx; cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ? mode : BNXT_QPLIB_WQE_MODE_STATIC; + if (bnxt_re_hwrm_qcaps(rdev)) + dev_err(rdev_to_dev(rdev), + "Failed to query hwrm qcaps\n"); } static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev) @@ -112,6 +154,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode) { struct bnxt_qplib_chip_ctx *chip_ctx; struct bnxt_en_dev *en_dev; + int rc; en_dev = rdev->en_dev; @@ -130,6 +173,12 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode) rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev); bnxt_re_set_drv_mode(rdev, wqe_mode); + + bnxt_re_set_db_offset(rdev); + rc = bnxt_qplib_map_db_bar(&rdev->qplib_res); |
