summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/irdma/ctrl.c
diff options
context:
space:
mode:
authorShiraz Saleem <shiraz.saleem@intel.com>2022-02-17 09:18:49 -0600
committerJason Gunthorpe <jgg@nvidia.com>2022-02-23 15:24:18 -0400
commit2c4b14ea9507106c0599349fbb8efdeb3b7aa840 (patch)
treec3c7a7a79d2101fad7bf94e9725d454d652c2e39 /drivers/infiniband/hw/irdma/ctrl.c
parent4eaa29b45e09d8565bd8bf596750d9b90ad5c806 (diff)
downloadlinux-2c4b14ea9507106c0599349fbb8efdeb3b7aa840.tar.gz
linux-2c4b14ea9507106c0599349fbb8efdeb3b7aa840.tar.bz2
linux-2c4b14ea9507106c0599349fbb8efdeb3b7aa840.zip
RDMA/irdma: Remove enum irdma_status_code
Replace use of custom irdma_status_code with linux error codes. Remove enum irdma_status_code and header in which its defined. Link: https://lore.kernel.org/r/20220217151851.1518-2-shiraz.saleem@intel.com Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/hw/irdma/ctrl.c')
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c553
1 files changed, 255 insertions, 298 deletions
diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
index 94a9c26ac83f..01cf75e9fd48 100644
--- a/drivers/infiniband/hw/irdma/ctrl.c
+++ b/drivers/infiniband/hw/irdma/ctrl.c
@@ -3,7 +3,6 @@
#include <linux/etherdevice.h>
#include "osdep.h"
-#include "status.h"
#include "hmc.h"
#include "defs.h"
#include "type.h"
@@ -180,17 +179,16 @@ void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_i
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp,
- struct irdma_add_arp_cache_entry_info *info,
- u64 scratch, bool post_sq)
+static int irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp,
+ struct irdma_add_arp_cache_entry_info *info,
+ u64 scratch, bool post_sq)
{
__le64 *wqe;
u64 hdr;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 8, info->reach_max);
set_64bit_val(wqe, 16, ether_addr_to_u64(info->mac_addr));
@@ -218,16 +216,15 @@ irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp,
* @arp_index: arp index to delete arp entry
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch,
- u16 arp_index, bool post_sq)
+static int irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch,
+ u16 arp_index, bool post_sq)
{
__le64 *wqe;
u64 hdr;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
hdr = arp_index |
FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
@@ -252,17 +249,16 @@ irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp,
- struct irdma_apbvt_info *info, u64 scratch,
- bool post_sq)
+static int irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp,
+ struct irdma_apbvt_info *info,
+ u64 scratch, bool post_sq)
{
__le64 *wqe;
u64 hdr;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, info->port);
@@ -300,7 +296,7 @@ irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp,
* quad hash entry in the hardware will point to iwarp's qp
* number and requires no calls from the driver.
*/
-static enum irdma_status_code
+static int
irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp,
struct irdma_qhash_table_info *info,
u64 scratch, bool post_sq)
@@ -313,7 +309,7 @@ irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp,
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr));
@@ -376,10 +372,9 @@ irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp,
* @qp: sc qp
* @info: initialization qp info
*/
-enum irdma_status_code irdma_sc_qp_init(struct irdma_sc_qp *qp,
- struct irdma_qp_init_info *info)
+int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info)
{
- enum irdma_status_code ret_code;
+ int ret_code;
u32 pble_obj_cnt;
u16 wqe_size;
@@ -387,7 +382,7 @@ enum irdma_status_code irdma_sc_qp_init(struct irdma_sc_qp *qp,
info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags ||
info->qp_uk_init_info.max_rq_frag_cnt >
info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags)
- return IRDMA_ERR_INVALID_FRAG_COUNT;
+ return -EINVAL;
qp->dev = info->pd->dev;
qp->vsi = info->vsi;
@@ -410,7 +405,7 @@ enum irdma_status_code irdma_sc_qp_init(struct irdma_sc_qp *qp,
if ((info->virtual_map && info->sq_pa >= pble_obj_cnt) ||
(info->virtual_map && info->rq_pa >= pble_obj_cnt))
- return IRDMA_ERR_INVALID_PBLE_INDEX;
+ return -EINVAL;
qp->llp_stream_handle = (void *)(-1);
qp->hw_sq_size = irdma_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
@@ -450,8 +445,8 @@ enum irdma_status_code irdma_sc_qp_init(struct irdma_sc_qp *qp,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-enum irdma_status_code irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info,
- u64 scratch, bool post_sq)
+int irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info,
+ u64 scratch, bool post_sq)
{
struct irdma_sc_cqp *cqp;
__le64 *wqe;
@@ -460,11 +455,11 @@ enum irdma_status_code irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_c
cqp = qp->dev->cqp;
if (qp->qp_uk.qp_id < cqp->dev->hw_attrs.min_hw_qp_id ||
qp->qp_uk.qp_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt - 1))
- return IRDMA_ERR_INVALID_QP_ID;
+ return -EINVAL;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
set_64bit_val(wqe, 40, qp->shadow_area_pa);
@@ -501,9 +496,8 @@ enum irdma_status_code irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_c
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-enum irdma_status_code irdma_sc_qp_modify(struct irdma_sc_qp *qp,
- struct irdma_modify_qp_info *info,
- u64 scratch, bool post_sq)
+int irdma_sc_qp_modify(struct irdma_sc_qp *qp, struct irdma_modify_qp_info *info,
+ u64 scratch, bool post_sq)
{
__le64 *wqe;
struct irdma_sc_cqp *cqp;
@@ -514,7 +508,7 @@ enum irdma_status_code irdma_sc_qp_modify(struct irdma_sc_qp *qp,
cqp = qp->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
if (info->next_iwarp_state == IRDMA_QP_STATE_TERMINATE) {
if (info->dont_send_fin)
@@ -572,9 +566,8 @@ enum irdma_status_code irdma_sc_qp_modify(struct irdma_sc_qp *qp,
* @ignore_mw_bnd: memory window bind flag
* @post_sq: flag for cqp db to ring
*/
-enum irdma_status_code irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
- bool remove_hash_idx, bool ignore_mw_bnd,
- bool post_sq)
+int irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
+ bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq)
{
__le64 *wqe;
struct irdma_sc_cqp *cqp;
@@ -583,7 +576,7 @@ enum irdma_status_code irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
cqp = qp->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
set_64bit_val(wqe, 40, qp->shadow_area_pa);
@@ -765,16 +758,15 @@ void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
- bool post_sq)
+static int irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
+ bool post_sq)
{
__le64 *wqe;
u64 hdr;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE,
IRDMA_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY) |
@@ -800,17 +792,16 @@ irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp,
- struct irdma_local_mac_entry_info *info,
- u64 scratch, bool post_sq)
+static int irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp,
+ struct irdma_local_mac_entry_info *info,
+ u64 scratch, bool post_sq)
{
__le64 *wqe;
u64 header;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 32, ether_addr_to_u64(info->mac_addr));
@@ -839,16 +830,16 @@ irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp,
* @ignore_ref_count: to force mac adde delete
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
- u16 entry_idx, u8 ignore_ref_count, bool post_sq)
+static int irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
+ u16 entry_idx, u8 ignore_ref_count,
+ bool post_sq)
{
__le64 *wqe;
u64 header;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, entry_idx) |
FIELD_PREP(IRDMA_CQPSQ_OPCODE,
IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
@@ -1061,10 +1052,9 @@ void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
- struct irdma_allocate_stag_info *info, u64 scratch,
- bool post_sq)
+static int irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
+ struct irdma_allocate_stag_info *info,
+ u64 scratch, bool post_sq)
{
__le64 *wqe;
struct irdma_sc_cqp *cqp;
@@ -1081,7 +1071,7 @@ irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 8,
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID) |
@@ -1123,10 +1113,9 @@ irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
- struct irdma_reg_ns_stag_info *info, u64 scratch,
- bool post_sq)
+static int irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
+ struct irdma_reg_ns_stag_info *info,
+ u64 scratch, bool post_sq)
{
__le64 *wqe;
u64 fbo;
@@ -1144,7 +1133,7 @@ irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
else if (info->page_size == 0x1000)
page_size = IRDMA_PAGE_SIZE_4K;
else
- return IRDMA_ERR_PARAM;
+ return -EINVAL;
if (info->access_rights & (IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY |
IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY))
@@ -1154,12 +1143,12 @@ irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
pble_obj_cnt = dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
if (info->chunk_size && info->first_pm_pbl_index >= pble_obj_cnt)
- return IRDMA_ERR_INVALID_PBLE_INDEX;
+ return -EINVAL;
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
fbo = info->va & (info->page_size - 1);
set_64bit_val(wqe, 0,
@@ -1212,10 +1201,9 @@ irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
- struct irdma_dealloc_stag_info *info, u64 scratch,
- bool post_sq)
+static int irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
+ struct irdma_dealloc_stag_info *info,
+ u64 scratch, bool post_sq)
{
u64 hdr;
__le64 *wqe;
@@ -1224,7 +1212,7 @@ irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 8,
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
@@ -1253,9 +1241,9 @@ irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_mw_alloc(struct irdma_sc_dev *dev, struct irdma_mw_alloc_info *info,
- u64 scratch, bool post_sq)
+static int irdma_sc_mw_alloc(struct irdma_sc_dev *dev,
+ struct irdma_mw_alloc_info *info, u64 scratch,
+ bool post_sq)
{
u64 hdr;
struct irdma_sc_cqp *cqp;
@@ -1264,7 +1252,7 @@ irdma_sc_mw_alloc(struct irdma_sc_dev *dev, struct irdma_mw_alloc_info *info,
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 8,
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
@@ -1294,9 +1282,9 @@ irdma_sc_mw_alloc(struct irdma_sc_dev *dev, struct irdma_mw_alloc_info *info,
* @info: fast mr info
* @post_sq: flag for cqp db to ring
*/
-enum irdma_status_code
-irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
- struct irdma_fast_reg_stag_info *info, bool post_sq)
+int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
+ struct irdma_fast_reg_stag_info *info,
+ bool post_sq)
{
u64 temp, hdr;
__le64 *wqe;
@@ -1318,7 +1306,7 @@ irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
wqe = irdma_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx,
IRDMA_QP_WQE_MIN_QUANTA, 0, &sq_info);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
irdma_clr_wqes(&qp->qp_uk, wqe_idx);
@@ -1847,8 +1835,7 @@ void irdma_terminate_received(struct irdma_sc_qp *qp,
}
}
-static enum irdma_status_code irdma_null_ws_add(struct irdma_sc_vsi *vsi,
- u8 user_pri)
+static int irdma_null_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
{
return 0;
}
@@ -1933,8 +1920,8 @@ static u8 irdma_get_fcn_id(struct irdma_sc_vsi *vsi)
* @vsi: pointer to the vsi structure
* @info: The info structure used for initialization
*/
-enum irdma_status_code irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
- struct irdma_vsi_stats_info *info)
+int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
+ struct irdma_vsi_stats_info *info)
{
u8 fcn_id = info->fcn_id;
struct irdma_dma_mem *stats_buff_mem;
@@ -1949,7 +1936,7 @@ enum irdma_status_code irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
&stats_buff_mem->pa,
GFP_KERNEL);
if (!stats_buff_mem->va)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
vsi->pestat->gather_info.gather_stats_va = stats_buff_mem->va;
vsi->pestat->gather_info.last_gather_stats_va =
@@ -1976,7 +1963,7 @@ stats_error:
stats_buff_mem->va, stats_buff_mem->pa);
stats_buff_mem->va = NULL;
- return IRDMA_ERR_CQP_COMPL_ERROR;
+ return -EIO;
}
/**
@@ -2038,19 +2025,19 @@ u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type)
* @info: gather stats info structure
* @scratch: u64 saved to be used during cqp completion
*/
-static enum irdma_status_code
-irdma_sc_gather_stats(struct irdma_sc_cqp *cqp,
- struct irdma_stats_gather_info *info, u64 scratch)
+static int irdma_sc_gather_stats(struct irdma_sc_cqp *cqp,
+ struct irdma_stats_gather_info *info,
+ u64 scratch)
{
__le64 *wqe;
u64 temp;
if (info->stats_buff_mem.size < IRDMA_GATHER_STATS_BUF_SIZE)
- return IRDMA_ERR_BUF_TOO_SHORT;
+ return -ENOMEM;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 40,
FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fcn_index));
@@ -2085,17 +2072,16 @@ irdma_sc_gather_stats(struct irdma_sc_cqp *cqp,
* @alloc: alloc vs. delete flag
* @scratch: u64 saved to be used during cqp completion
*/
-static enum irdma_status_code
-irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp,
- struct irdma_stats_inst_info *info, bool alloc,
- u64 scratch)
+static int irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp,
+ struct irdma_stats_inst_info *info,
+ bool alloc, u64 scratch)
{
__le64 *wqe;
u64 temp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 40,
FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fn_id));
@@ -2123,9 +2109,8 @@ irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp,
* @info: User priority map info
* @scratch: u64 saved to be used during cqp completion
*/
-static enum irdma_status_code irdma_sc_set_up_map(struct irdma_sc_cqp *cqp,
- struct irdma_up_info *info,
- u64 scratch)
+static int irdma_sc_set_up_map(struct irdma_sc_cqp *cqp,
+ struct irdma_up_info *info, u64 scratch)
{
__le64 *wqe;
u64 temp = 0;
@@ -2133,7 +2118,7 @@ static enum irdma_status_code irdma_sc_set_up_map(struct irdma_sc_cqp *cqp,
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
temp |= (u64)info->map[i] << (i * 8);
@@ -2166,17 +2151,16 @@ static enum irdma_status_code irdma_sc_set_up_map(struct irdma_sc_cqp *cqp,
* @node_op: 0 for add 1 for modify, 2 for delete
* @scratch: u64 saved to be used during cqp completion
*/
-static enum irdma_status_code
-irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp,
- struct irdma_ws_node_info *info,
- enum irdma_ws_node_op node_op, u64 scratch)
+static int irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp,
+ struct irdma_ws_node_info *info,
+ enum irdma_ws_node_op node_op, u64 scratch)
{
__le64 *wqe;
u64 temp = 0;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 32,
FIELD_PREP(IRDMA_CQPSQ_WS_VSI, info->vsi) |
@@ -2209,9 +2193,9 @@ irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-enum irdma_status_code irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
- struct irdma_qp_flush_info *info,
- u64 scratch, bool post_sq)
+int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
+ struct irdma_qp_flush_info *info, u64 scratch,
+ bool post_sq)
{
u64 temp = 0;
__le64 *wqe;
@@ -2230,13 +2214,13 @@ enum irdma_status_code irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
ibdev_dbg(to_ibdev(qp->dev),
"CQP: Additional flush request ignored for qp %x\n",
qp->qp_uk.qp_id);
- return IRDMA_ERR_FLUSHED_Q;
+ return -EALREADY;
}
cqp = qp->pd->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
if (info->userflushcode) {
if (flush_rq)
@@ -2283,9 +2267,9 @@ enum irdma_status_code irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code irdma_sc_gen_ae(struct irdma_sc_qp *qp,
- struct irdma_gen_ae_info *info,
- u64 scratch, bool post_sq)
+static int irdma_sc_gen_ae(struct irdma_sc_qp *qp,
+ struct irdma_gen_ae_info *info, u64 scratch,
+ bool post_sq)
{
u64 temp;
__le64 *wqe;
@@ -2295,7 +2279,7 @@ static enum irdma_status_code irdma_sc_gen_ae(struct irdma_sc_qp *qp,
cqp = qp->pd->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
temp = info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
info->ae_src);
@@ -2323,10 +2307,9 @@ static enum irdma_status_code irdma_sc_gen_ae(struct irdma_sc_qp *qp,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_qp_upload_context(struct irdma_sc_dev *dev,
- struct irdma_upload_context_info *info, u64 scratch,
- bool post_sq)
+static int irdma_sc_qp_upload_context(struct irdma_sc_dev *dev,
+ struct irdma_upload_context_info *info,
+ u64 scratch, bool post_sq)
{
__le64 *wqe;
struct irdma_sc_cqp *cqp;
@@ -2335,7 +2318,7 @@ irdma_sc_qp_upload_context(struct irdma_sc_dev *dev,
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, info->buf_pa);
@@ -2364,21 +2347,20 @@ irdma_sc_qp_upload_context(struct irdma_sc_dev *dev,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp,
- struct irdma_cqp_manage_push_page_info *info,
- u64 scratch, bool post_sq)
+static int irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp,
+ struct irdma_cqp_manage_push_page_info *info,
+ u64 scratch, bool post_sq)
{
__le64 *wqe;
u64 hdr;
if (info->free_page &&
info->push_idx >= cqp->dev->hw_attrs.max_hw_device_pages)
- return IRDMA_ERR_INVALID_PUSH_PAGE_INDEX;
+ return -EINVAL;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, info->qs_handle);
hdr = FIELD_PREP(IRDMA_CQPSQ_MPP_PPIDX, info->push_idx) |
@@ -2404,16 +2386,15 @@ irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp,
* @qp: sc qp struct
* @scratch: u64 saved to be used during cqp completion
*/
-static enum irdma_status_code irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp,
- struct irdma_sc_qp *qp,
- u64 scratch)
+static int irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
+ u64 scratch)
{
u64 hdr;
__le64 *wqe;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
hdr = FIELD_PREP(IRDMA_CQPSQ_SUSPENDQP_QPID, qp->qp_uk.qp_id) |
FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_SUSPEND_QP) |
@@ -2435,16 +2416,15 @@ static enum irdma_status_code irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp,
* @qp: sc qp struct
* @scratch: u64 saved to be used during cqp completion
*/
-static enum irdma_status_code irdma_sc_resume_qp(struct irdma_sc_cqp *cqp,
- struct irdma_sc_qp *qp,
- u64 scratch)
+static int irdma_sc_resume_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
+ u64 scratch)
{
u64 hdr;
__le64 *wqe;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16,
FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QSHANDLE, qp->qs_handle));
@@ -2477,14 +2457,13 @@ static inline void irdma_sc_cq_ack(struct irdma_sc_cq *cq)
* @cq: cq struct
* @info: cq initialization info
*/
-enum irdma_status_code irdma_sc_cq_init(struct irdma_sc_cq *cq,
- struct irdma_cq_init_info *info)
+int irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info)
{
u32 pble_obj_cnt;
pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
- return IRDMA_ERR_INVALID_PBLE_INDEX;
+ return -EINVAL;
cq->cq_pa = info->cq_base_pa;
cq->dev = info->dev;
@@ -2515,23 +2494,21 @@ enum irdma_status_code irdma_sc_cq_init(struct irdma_sc_cq *cq,
* @check_overflow: flag for overflow check
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code irdma_sc_cq_create(struct irdma_sc_cq *cq,
- u64 scratch,
- bool check_overflow,
- bool post_sq)
+static int irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch,
+ bool check_overflow, bool post_sq)
{
__le64 *wqe;
struct irdma_sc_cqp *cqp;
u64 hdr;
struct irdma_sc_ceq *ceq;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
cqp = cq->dev->cqp;
if (cq->cq_uk.cq_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt - 1))
- return IRDMA_ERR_INVALID_CQ_ID;
+ return -EINVAL;
if (cq->ceq_id > (cq->dev->hmc_fpm_misc.max_ceqs - 1))
- return IRDMA_ERR_INVALID_CEQ_ID;
+ return -EINVAL;
ceq = cq->dev->ceq[cq->ceq_id];
if (ceq && ceq->reg_cq)
@@ -2544,7 +2521,7 @@ static enum irdma_status_code irdma_sc_cq_create(struct irdma_sc_cq *cq,
if (!wqe) {
if (ceq && ceq->reg_cq)
irdma_sc_remove_cq_ctx(ceq, cq);
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
}
set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
@@ -2590,8 +2567,7 @@ static enum irdma_status_code irdma_sc_cq_create(struct irdma_sc_cq *cq,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-enum irdma_status_code irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch,
- bool post_sq)
+int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq)
{
struct irdma_sc_cqp *cqp;
__le64 *wqe;
@@ -2601,7 +2577,7 @@ enum irdma_status_code irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch,
cqp = cq->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
ceq = cq->dev->ceq[cq->ceq_id];
if (ceq && ceq->reg_cq)
@@ -2657,9 +2633,9 @@ void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *inf
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag to post to sq
*/
-static enum irdma_status_code
-irdma_sc_cq_modify(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info,
- u64 scratch, bool post_sq)
+static int irdma_sc_cq_modify(struct irdma_sc_cq *cq,
+ struct irdma_modify_cq_info *info, u64 scratch,
+ bool post_sq)
{
struct irdma_sc_cqp *cqp;
__le64 *wqe;
@@ -2669,12 +2645,12 @@ irdma_sc_cq_modify(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info,
pble_obj_cnt = cq->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
if (info->cq_resize && info->virtual_map &&
info->first_pm_pbl_idx >= pble_obj_cnt)
- return IRDMA_ERR_INVALID_PBLE_INDEX;
+ return -EINVAL;
cqp = cq->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 0, info->cq_size);
set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
@@ -2748,8 +2724,8 @@ static inline void irdma_get_cqp_reg_info(struct irdma_sc_cqp *cqp, u32 *val,
* @tail: wqtail register value
* @count: how many times to try for completion
*/
-static enum irdma_status_code irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp,
- u32 tail, u32 count)
+static int irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp, u32 tail,
+ u32 count)
{
u32 i = 0;
u32 newtail, error, val;
@@ -2761,7 +2737,7 @@ static enum irdma_status_code irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp,
ibdev_dbg(to_ibdev(cqp->dev),
"CQP: CQPERRCODES error_code[x%08X]\n",
error);
- return IRDMA_ERR_CQP_COMPL_ERROR;
+ return -EIO;
}
if (newtail != tail) {
/* SUCCESS */
@@ -2772,7 +2748,7 @@ static enum irdma_status_code irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp,
udelay(cqp->dev->hw_attrs.max_sleep_count);
}
- return IRDMA_ERR_TIMEOUT;
+ return -ETIMEDOUT;
}
/**
@@ -2927,10 +2903,9 @@ static u64 irdma_sc_decode_fpm_query(__le64 *buf, u32 buf_idx,
* parses fpm query buffer and copy max_cnt and
* size value of hmc objects in hmc_info
*/
-static enum irdma_status_code
-irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
- struct irdma_hmc_info *hmc_info,
- struct irdma_hmc_fpm_misc *hmc_fpm_misc)
+static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
+ struct irdma_hmc_info *hmc_info,
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc)
{
struct irdma_hmc_obj_info *obj_info;
u64 temp;
@@ -2969,7 +2944,7 @@ irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
obj_info[IRDMA_HMC_IW_XFFL].size = 4;
hmc_fpm_misc->xf_block_size = FIELD_GET(IRDMA_QUERY_FPM_XFBLOCKSIZE, temp);
if (!hmc_fpm_misc->xf_block_size)
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
irdma_sc_decode_fpm_query(buf, 72, obj_info, IRDMA_HMC_IW_Q1);
get_64bit_val(buf, 80, &temp);
@@ -2978,7 +2953,7 @@ irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
hmc_fpm_misc->q1_block_size = FIELD_GET(IRDMA_QUERY_FPM_Q1BLOCKSIZE, temp);
if (!hmc_fpm_misc->q1_block_size)
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
irdma_sc_decode_fpm_query(buf, 88, obj_info, IRDMA_HMC_IW_TIMER);
@@ -3002,7 +2977,7 @@ irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
hmc_fpm_misc->rrf_block_size = FIELD_GET(IRDMA_QUERY_FPM_RRFBLOCKSIZE, temp);
if (!hmc_fpm_misc->rrf_block_size &&
obj_info[IRDMA_HMC_IW_RRFFL].max_cnt)
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
irdma_sc_decode_fpm_query(buf, 144, obj_info, IRDMA_HMC_IW_HDR);
irdma_sc_decode_fpm_query(buf, 152, obj_info, IRDMA_HMC_IW_MD);
@@ -3014,7 +2989,7 @@ irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp);
if (!hmc_fpm_misc->ooiscf_block_size &&
obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt)
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
return 0;
}
@@ -3042,8 +3017,7 @@ static u32 irdma_sc_find_reg_cq(struct irdma_sc_ceq *ceq,
* @ceq: ceq sc structure
* @cq: cq sc structure
*/
-enum irdma_status_code irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq,
- struct irdma_sc_cq *cq)
+int irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq)
{
unsigned long flags;
@@ -3051,7 +3025,7 @@ enum irdma_status_code irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq,
if (ceq->reg_cq_size == ceq->elem_cnt) {
spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
- return IRDMA_ERR_REG_CQ_FULL;
+ return -ENOMEM;
}
ceq->reg_cq[ceq->reg_cq_size++] = cq;
@@ -3092,15 +3066,15 @@ exit:
*
* Initializes the object and context buffers for a control Queue Pair.
*/
-enum irdma_status_code irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
- struct irdma_cqp_init_info *info)
+int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
+ struct irdma_cqp_init_info *info)
{
u8 hw_sq_size;
if (info->sq_size > IRDMA_CQP_SW_SQSIZE_2048 ||
info->sq_size < IRDMA_CQP_SW_SQSIZE_4 ||
((info->sq_size & (info->sq_size - 1))))
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
hw_sq_size = irdma_get_encoded_wqe_size(info->sq_size,
IRDMA_QUEUE_TYPE_CQP);
@@ -3150,13 +3124,12 @@ enum irdma_status_code irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
* @maj_err: If error, major err number
* @min_err: If error, minor err number
*/
-enum irdma_status_code irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err,
- u16 *min_err)
+int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err)
{
u64 temp;
u8 hw_rev;
u32 cnt = 0, p1, p2, val = 0, err_code;
- enum irdma_status_code ret_code;
+ int ret_code;
hw_rev = cqp->dev->hw_attrs.uk_attrs.hw_rev;
cqp->sdbuf.size = ALIGN(IRDMA_UPDATE_SD_BUFF_SIZE * cqp->sq_size,
@@ -3165,7 +3138,7 @@ enum irdma_status_code irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_er
cqp->sdbuf.size, &cqp->sdbuf.pa,
GFP_KERNEL);
if (!cqp->sdbuf.va)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
spin_lock_init(&cqp->dev->cqp_lock);
@@ -3220,7 +3193,7 @@ enum irdma_status_code irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_er
do {
if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
- ret_code = IRDMA_ERR_TIMEOUT;
+ ret_code = -ETIMEDOUT;
goto err;
}
udelay(cqp->dev->hw_attrs.max_sleep_count);
@@ -3228,7 +3201,7 @@ enum irdma_status_code irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_er
} while (!val);
if (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_ERR)) {
- ret_code = IRDMA_ERR_DEVICE_NOT_SUPPORTED;
+ ret_code = -EOPNOTSUPP;
goto err;
}
@@ -3269,7 +3242,7 @@ __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch
u32 *wqe_idx)
{
__le64 *wqe = NULL;
- enum irdma_status_code ret_code;
+ int ret_code;
if (IRDMA_RING_FULL_ERR(cqp->sq_ring)) {
ibdev_dbg(to_ibdev(cqp->dev),
@@ -3296,16 +3269,16 @@ __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch
* irdma_sc_cqp_destroy - destroy cqp during close
* @cqp: struct for cqp hw
*/
-enum irdma_status_code irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp)
+int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp)
{
u32 cnt = 0, val;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
writel(0, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
writel(0, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
do {
if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
- ret_code = IRDMA_ERR_TIMEOUT;
+ ret_code = -ETIMEDOUT;
break;
}
udelay(cqp->dev->hw_attrs.max_sleep_count);
@@ -3350,8 +3323,8 @@ void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq)
* @ccq: ccq sc struct
* @info: completion q entry to return
*/
-enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
- struct irdma_ccq_cqe_info *info)
+int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
+ struct irdma_ccq_cqe_info *info)
{
u64 qp_ctx, temp, temp1;
__le64 *cqe;
@@ -3359,7 +3332,7 @@ enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
u32 wqe_idx;
u32 error;
u8 polarity;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
if (ccq->cq_uk.avoid_mem_cflct)
cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(&ccq->cq_uk);
@@ -3369,7 +3342,7 @@ enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
get_64bit_val(cqe, 24, &temp);
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, temp);
if (polarity != ccq->cq_uk.polarity)
- return IRDMA_ERR_Q_EMPTY;
+ return -ENOENT;
get_64bit_val(cqe, 8, &qp_ctx);
cqp = (struct irdma_sc_cqp *)(unsigned long)qp_ctx;
@@ -3416,25 +3389,25 @@ enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
* @op_code: cqp opcode for completion
* @compl_info: completion q entry to return
*/
-enum irdma_status_code irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 op_code,
-