summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-11-22 20:03:57 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2024-11-22 20:03:57 -0800
commit2a163a4cea153348172e260a0c5b5569103a66a3 (patch)
tree8cbb176b7850336330f42415085b4d28d8583f21
parentceba6f6f33f29ab838b23a567621b847e527d085 (diff)
parent68b3bca2df00f0a63f0aa2db2b2adc795665229e (diff)
downloadlinux-2a163a4cea153348172e260a0c5b5569103a66a3.tar.gz
linux-2a163a4cea153348172e260a0c5b5569103a66a3.tar.bz2
linux-2a163a4cea153348172e260a0c5b5569103a66a3.zip
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe: "Seveal fixes scattered across the drivers and a few new features: - Minor updates and bug fixes to hfi1, efa, iopob, bnxt, hns - Force disassociate the userspace FD when hns does an async reset - bnxt new features for optimized modify QP to skip certain stayes, CQ coalescing, better debug dumping - mlx5 new data placement ordering feature - Faster destruction of mlx5 devx HW objects - Improvements to RDMA CM mad handling" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (51 commits) RDMA/bnxt_re: Correct the sequence of device suspend RDMA/bnxt_re: Use the default mode of congestion control RDMA/bnxt_re: Support different traffic class IB/cm: Rework sending DREQ when destroying a cm_id IB/cm: Do not hold reference on cm_id unless needed IB/cm: Explicitly mark if a response MAD is a retransmission RDMA/mlx5: Move events notifier registration to be after device registration RDMA/bnxt_re: Cache MSIx info to a local structure RDMA/bnxt_re: Refurbish CQ to NQ hash calculation RDMA/bnxt_re: Refactor NQ allocation RDMA/bnxt_re: Fail probe early when not enough MSI-x vectors are reserved RDMA/hns: Fix different dgids mapping to the same dip_idx RDMA/bnxt_re: Add set_func_resources support for P5/P7 adapters RDMA/bnxt_re: Enhance RoCE SRIOV resource configuration design bnxt_en: Add support for RoCE sriov configuration RDMA/hns: Fix NULL pointer derefernce in hns_roce_map_mr_sg() RDMA/hns: Fix out-of-order issue of requester when setting FENCE RDMA/nldev: Add IB device and net device rename events RDMA/mlx5: Add implementation for ufile_hw_cleanup device operation RDMA/core: Move ib_uverbs_file struct to uverbs_types.h ...
-rw-r--r--drivers/infiniband/core/cm.c170
-rw-r--r--drivers/infiniband/core/device.c39
-rw-r--r--drivers/infiniband/core/nldev.c40
-rw-r--r--drivers/infiniband/core/rdma_core.c12
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c30
-rw-r--r--drivers/infiniband/core/uverbs.h29
-rw-r--r--drivers/infiniband/core/uverbs_main.c43
-rw-r--r--drivers/infiniband/hw/bnxt_re/Makefile3
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h47
-rw-r--r--drivers/infiniband/hw/bnxt_re/debugfs.c138
-rw-r--r--drivers/infiniband/hw/bnxt_re/debugfs.h21
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c130
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c453
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c73
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h23
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c19
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h13
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c35
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h57
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h63
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_defs.h4
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c6
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h4
-rw-r--r--drivers/infiniband/hw/efa/efa_io_defs.h106
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c51
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c2
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_debugfs.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h14
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c48
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c257
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c7
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c11
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c77
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c4
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c93
-rw-r--r--drivers/infiniband/hw/mlx5/devx.h4
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c8
-rw-r--r--drivers/infiniband/hw/mlx5/main.c78
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c51
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c9
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c3
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c53
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c12
-rw-r--r--include/linux/mlx5/mlx5_ifc.h23
-rw-r--r--include/rdma/ib_verbs.h17
-rw-r--r--include/rdma/uverbs_types.h33
-rw-r--r--include/uapi/rdma/efa-abi.h3
-rw-r--r--include/uapi/rdma/mlx5-abi.h5
-rw-r--r--include/uapi/rdma/rdma_netlink.h2
63 files changed, 1977 insertions, 499 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 07fb8d3c037f..142170473e75 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -35,6 +35,8 @@ MODULE_DESCRIPTION("InfiniBand CM");
MODULE_LICENSE("Dual BSD/GPL");
#define CM_DESTROY_ID_WAIT_TIMEOUT 10000 /* msecs */
+#define CM_DIRECT_RETRY_CTX ((void *) 1UL)
+
static const char * const ibcm_rej_reason_strs[] = {
[IB_CM_REJ_NO_QP] = "no QP",
[IB_CM_REJ_NO_EEC] = "no EEC",
@@ -93,8 +95,7 @@ static void cm_process_work(struct cm_id_private *cm_id_priv,
struct cm_work *work);
static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
struct ib_cm_sidr_rep_param *param);
-static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
- const void *private_data, u8 private_data_len);
+static void cm_issue_dreq(struct cm_id_private *cm_id_priv);
static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
void *private_data, u8 private_data_len);
static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
@@ -307,12 +308,7 @@ static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv)
goto out;
}
- /* Timeout set by caller if response is expected. */
m->ah = ah;
- m->retries = cm_id_priv->max_cm_retries;
-
- refcount_inc(&cm_id_priv->refcount);
- m->context[0] = cm_id_priv;
out:
spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
@@ -321,16 +317,13 @@ out:
static void cm_free_msg(struct ib_mad_send_buf *msg)
{
- struct cm_id_private *cm_id_priv = msg->context[0];
-
if (msg->ah)
rdma_destroy_ah(msg->ah, 0);
- cm_deref_id(cm_id_priv);
ib_free_send_mad(msg);
}
static struct ib_mad_send_buf *
-cm_alloc_priv_msg(struct cm_id_private *cm_id_priv)
+cm_alloc_priv_msg(struct cm_id_private *cm_id_priv, enum ib_cm_state state)
{
struct ib_mad_send_buf *msg;
@@ -339,7 +332,15 @@ cm_alloc_priv_msg(struct cm_id_private *cm_id_priv)
msg = cm_alloc_msg(cm_id_priv);
if (IS_ERR(msg))
return msg;
+
cm_id_priv->msg = msg;
+ refcount_inc(&cm_id_priv->refcount);
+ msg->context[0] = cm_id_priv;
+ msg->context[1] = (void *) (unsigned long) state;
+
+ msg->retries = cm_id_priv->max_cm_retries;
+ msg->timeout_ms = cm_id_priv->timeout_ms;
+
return msg;
}
@@ -358,13 +359,20 @@ static void cm_free_priv_msg(struct ib_mad_send_buf *msg)
ib_free_send_mad(msg);
}
-static struct ib_mad_send_buf *cm_alloc_response_msg_no_ah(struct cm_port *port,
- struct ib_mad_recv_wc *mad_recv_wc)
+static struct ib_mad_send_buf *
+cm_alloc_response_msg_no_ah(struct cm_port *port,
+ struct ib_mad_recv_wc *mad_recv_wc,
+ bool direct_retry)
{
- return ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
- 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
- GFP_ATOMIC,
- IB_MGMT_BASE_VERSION);
+ struct ib_mad_send_buf *m;
+
+ m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
+ 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
+ GFP_ATOMIC, IB_MGMT_BASE_VERSION);
+ if (!IS_ERR(m))
+ m->context[0] = direct_retry ? CM_DIRECT_RETRY_CTX : NULL;
+
+ return m;
}
static int cm_create_response_msg_ah(struct cm_port *port,
@@ -384,12 +392,13 @@ static int cm_create_response_msg_ah(struct cm_port *port,
static int cm_alloc_response_msg(struct cm_port *port,
struct ib_mad_recv_wc *mad_recv_wc,
+ bool direct_retry,
struct ib_mad_send_buf **msg)
{
struct ib_mad_send_buf *m;
int ret;
- m = cm_alloc_response_msg_no_ah(port, mad_recv_wc);
+ m = cm_alloc_response_msg_no_ah(port, mad_recv_wc, direct_retry);
if (IS_ERR(m))
return PTR_ERR(m);
@@ -403,13 +412,6 @@ static int cm_alloc_response_msg(struct cm_port *port,
return 0;
}
-static void cm_free_response_msg(struct ib_mad_send_buf *msg)
-{
- if (msg->ah)
- rdma_destroy_ah(msg->ah, 0);
- ib_free_send_mad(msg);
-}
-
static void *cm_copy_private_data(const void *private_data, u8 private_data_len)
{
void *data;
@@ -1109,7 +1111,8 @@ retest:
cm_id->state = IB_CM_IDLE;
break;
}
- cm_send_dreq_locked(cm_id_priv, NULL, 0);
+ cm_issue_dreq(cm_id_priv);
+ cm_enter_timewait(cm_id_priv);
goto retest;
case IB_CM_DREQ_SENT:
ib_cancel_mad(cm_id_priv->msg);
@@ -1557,7 +1560,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
if (param->alternate_path)
cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av);
- msg = cm_alloc_priv_msg(cm_id_priv);
+ msg = cm_alloc_priv_msg(cm_id_priv, IB_CM_REQ_SENT);
if (IS_ERR(msg)) {
ret = PTR_ERR(msg);
goto out_unlock;
@@ -1566,8 +1569,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
req_msg = (struct cm_req_msg *)msg->mad;
cm_format_req(req_msg, cm_id_priv, param);
cm_id_priv->tid = req_msg->hdr.tid;
- msg->timeout_ms = cm_id_priv->timeout_ms;
- msg->context[1] = (void *)(unsigned long)IB_CM_REQ_SENT;
cm_id_priv->local_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
@@ -1598,7 +1599,7 @@ static int cm_issue_rej(struct cm_port *port,
struct cm_rej_msg *rej_msg, *rcv_msg;
int ret;
- ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
+ ret = cm_alloc_response_msg(port, mad_recv_wc, false, &msg);
if (ret)
return ret;
@@ -1624,7 +1625,7 @@ static int cm_issue_rej(struct cm_port *port,
IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
ret = ib_post_send_mad(msg, NULL);
if (ret)
- cm_free_response_msg(msg);
+ cm_free_msg(msg);
return ret;
}
@@ -1951,7 +1952,7 @@ static void cm_dup_req_handler(struct cm_work *work,
}
spin_unlock_irq(&cm_id_priv->lock);
- ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
+ ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, true, &msg);
if (ret)
return;
@@ -1980,7 +1981,7 @@ static void cm_dup_req_handler(struct cm_work *work,
return;
unlock: spin_unlock_irq(&cm_id_priv->lock);
-free: cm_free_response_msg(msg);
+free: cm_free_msg(msg);
}
static struct cm_id_private *cm_match_req(struct cm_work *work,
@@ -2294,7 +2295,7 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id,
goto out;
}
- msg = cm_alloc_priv_msg(cm_id_priv);
+ msg = cm_alloc_priv_msg(cm_id_priv, IB_CM_REP_SENT);
if (IS_ERR(msg)) {
ret = PTR_ERR(msg);
goto out;
@@ -2302,8 +2303,6 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id,
rep_msg = (struct cm_rep_msg *) msg->mad;
cm_format_rep(rep_msg, cm_id_priv, param);
- msg->timeout_ms = cm_id_priv->timeout_ms;
- msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
trace_icm_send_rep(cm_id);
ret = ib_post_send_mad(msg, NULL);
@@ -2444,7 +2443,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
atomic_long_inc(
&work->port->counters[CM_RECV_DUPLICATES][CM_REP_COUNTER]);
- ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
+ ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, true, &msg);
if (ret)
goto deref;
@@ -2469,7 +2468,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
goto deref;
unlock: spin_unlock_irq(&cm_id_priv->lock);
-free: cm_free_response_msg(msg);
+free: cm_free_msg(msg);
deref: cm_deref_id(cm_id_priv);
}
@@ -2653,59 +2652,68 @@ static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
private_data_len);
}
-static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
- const void *private_data, u8 private_data_len)
+static void cm_issue_dreq(struct cm_id_private *cm_id_priv)
{
struct ib_mad_send_buf *msg;
int ret;
lockdep_assert_held(&cm_id_priv->lock);
+ msg = cm_alloc_msg(cm_id_priv);
+ if (IS_ERR(msg))
+ return;
+
+ cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, NULL, 0);
+
+ trace_icm_send_dreq(&cm_id_priv->id);
+ ret = ib_post_send_mad(msg, NULL);
+ if (ret)
+ cm_free_msg(msg);
+}
+
+int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data,
+ u8 private_data_len)
+{
+ struct cm_id_private *cm_id_priv =
+ container_of(cm_id, struct cm_id_private, id);
+ struct ib_mad_send_buf *msg;
+ unsigned long flags;
+ int ret;
+
if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
return -EINVAL;
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
trace_icm_dreq_skipped(&cm_id_priv->id);
- return -EINVAL;
+ ret = -EINVAL;
+ goto unlock;
}
if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
ib_cancel_mad(cm_id_priv->msg);
- msg = cm_alloc_priv_msg(cm_id_priv);
+ msg = cm_alloc_priv_msg(cm_id_priv, IB_CM_DREQ_SENT);
if (IS_ERR(msg)) {
cm_enter_timewait(cm_id_priv);
- return PTR_ERR(msg);
+ ret = PTR_ERR(msg);
+ goto unlock;
}
cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
private_data, private_data_len);
- msg->timeout_ms = cm_id_priv->timeout_ms;
- msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
trace_icm_send_dreq(&cm_id_priv->id);
ret = ib_post_send_mad(msg, NULL);
if (ret) {
cm_enter_timewait(cm_id_priv);
cm_free_priv_msg(msg);
- return ret;
+ goto unlock;
}
cm_id_priv->id.state = IB_CM_DREQ_SENT;
- return 0;
-}
-
-int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data,
- u8 private_data_len)
-{
- struct cm_id_private *cm_id_priv =
- container_of(cm_id, struct cm_id_private, id);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&cm_id_priv->lock, flags);
- ret = cm_send_dreq_locked(cm_id_priv, private_data, private_data_len);
+unlock:
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
@@ -2791,7 +2799,7 @@ static int cm_issue_drep(struct cm_port *port,
struct cm_drep_msg *drep_msg;
int ret;
- ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
+ ret = cm_alloc_response_msg(port, mad_recv_wc, true, &msg);
if (ret)
return ret;
@@ -2809,7 +2817,7 @@ static int cm_issue_drep(struct cm_port *port,
IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
ret = ib_post_send_mad(msg, NULL);
if (ret)
- cm_free_response_msg(msg);
+ cm_free_msg(msg);
return ret;
}
@@ -2856,7 +2864,8 @@ static int cm_dreq_handler(struct cm_work *work)
case IB_CM_TIMEWAIT:
atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
[CM_DREQ_COUNTER]);
- msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
+ msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc,
+ true);
if (IS_ERR(msg))
goto unlock;
@@ -2867,7 +2876,7 @@ static int cm_dreq_handler(struct cm_work *work)
if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
ib_post_send_mad(msg, NULL))
- cm_free_response_msg(msg);
+ cm_free_msg(msg);
goto deref;
case IB_CM_DREQ_RCVD:
atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
@@ -3361,7 +3370,8 @@ static int cm_lap_handler(struct cm_work *work)
case IB_CM_MRA_LAP_SENT:
atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
[CM_LAP_COUNTER]);
- msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
+ msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc,
+ true);
if (IS_ERR(msg))
goto unlock;
@@ -3374,7 +3384,7 @@ static int cm_lap_handler(struct cm_work *work)
if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
ib_post_send_mad(msg, NULL))
- cm_free_response_msg(msg);
+ cm_free_msg(msg);
goto deref;
case IB_CM_LAP_RCVD:
atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
@@ -3513,7 +3523,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
goto out_unlock;
}
- msg = cm_alloc_priv_msg(cm_id_priv);
+ msg = cm_alloc_priv_msg(cm_id_priv, IB_CM_SIDR_REQ_SENT);
if (IS_ERR(msg)) {
ret = PTR_ERR(msg);
goto out_unlock;
@@ -3521,8 +3531,6 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
cm_format_sidr_req((struct cm_sidr_req_msg *)msg->mad, cm_id_priv,
param);
- msg->timeout_ms = cm_id_priv->timeout_ms;
- msg->context[1] = (void *)(unsigned long)IB_CM_SIDR_REQ_SENT;
trace_icm_send_sidr_req(&cm_id_priv->id);
ret = ib_post_send_mad(msg, NULL);
@@ -3768,17 +3776,17 @@ out:
static void cm_process_send_error(struct cm_id_private *cm_id_priv,
struct ib_mad_send_buf *msg,
- enum ib_cm_state state,
enum ib_wc_status wc_status)
{
+ enum ib_cm_state state = (unsigned long) msg->context[1];
struct ib_cm_event cm_event = {};
int ret;
- /* Discard old sends or ones without a response. */
+ /* Discard old sends. */
spin_lock_irq(&cm_id_priv->lock);
if (msg != cm_id_priv->msg) {
spin_unlock_irq(&cm_id_priv->lock);
- cm_free_msg(msg);
+ cm_free_priv_msg(msg);
return;
}
cm_free_priv_msg(msg);
@@ -3826,9 +3834,7 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_send_wc *mad_send_wc)
{
struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
- struct cm_id_private *cm_id_priv = msg->context[0];
- enum ib_cm_state state =
- (enum ib_cm_state)(unsigned long)msg->context[1];
+ struct cm_id_private *cm_id_priv;
struct cm_port *port;
u16 attr_index;
@@ -3836,13 +3842,12 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
attr_index = be16_to_cpu(((struct ib_mad_hdr *)
msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
- /*
- * If the send was in response to a received message (context[0] is not
- * set to a cm_id), and is not a REJ, then it is a send that was
- * manually retried.
- */
- if (!cm_id_priv && (attr_index != CM_REJ_COUNTER))
+ if (msg->context[0] == CM_DIRECT_RETRY_CTX) {
msg->retries = 1;
+ cm_id_priv = NULL;
+ } else {
+ cm_id_priv = msg->context[0];
+ }
atomic_long_add(1 + msg->retries, &port->counters[CM_XMIT][attr_index]);
if (msg->retries)
@@ -3850,10 +3855,9 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
&port->counters[CM_XMIT_RETRIES][attr_index]);
if (cm_id_priv)
- cm_process_send_error(cm_id_priv, msg, state,
- mad_send_wc->status);
+ cm_process_send_error(cm_id_priv, msg, mad_send_wc->status);
else
- cm_free_response_msg(msg);
+ cm_free_msg(msg);
}
static void cm_work_handler(struct work_struct *_work)
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index e029401b5680..ca9b956c034d 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -437,6 +437,7 @@ int ib_device_rename(struct ib_device *ibdev, const char *name)
client->rename(ibdev, client_data);
}
up_read(&ibdev->client_data_rwsem);
+ rdma_nl_notify_event(ibdev, 0, RDMA_RENAME_EVENT);
up_read(&devices_rwsem);
return 0;
}
@@ -2759,6 +2760,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, resize_cq);
SET_DEVICE_OP(dev_ops, set_vf_guid);
SET_DEVICE_OP(dev_ops, set_vf_link_state);
+ SET_DEVICE_OP(dev_ops, ufile_hw_cleanup);
SET_OBJ_SIZE(dev_ops, ib_ah);
SET_OBJ_SIZE(dev_ops, ib_counters);
@@ -2852,6 +2854,40 @@ static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
},
};
+static int ib_netdevice_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct net_device *ib_ndev;
+ struct ib_device *ibdev;
+ u32 port;
+
+ switch (event) {
+ case NETDEV_CHANGENAME:
+ ibdev = ib_device_get_by_netdev(ndev, RDMA_DRIVER_UNKNOWN);
+ if (!ibdev)
+ return NOTIFY_DONE;
+
+ rdma_for_each_port(ibdev, port) {
+ ib_ndev = ib_device_get_netdev(ibdev, port);
+ if (ndev == ib_ndev)
+ rdma_nl_notify_event(ibdev, port,
+ RDMA_NETDEV_RENAME_EVENT);
+ dev_put(ib_ndev);
+ }
+ ib_device_put(ibdev);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block nb_netdevice = {
+ .notifier_call = ib_netdevice_event,
+};
+
static int __init ib_core_init(void)
{
int ret = -ENOMEM;
@@ -2923,6 +2959,8 @@ static int __init ib_core_init(void)
goto err_parent;
}
+ register_netdevice_notifier(&nb_netdevice);
+
return 0;
err_parent:
@@ -2952,6 +2990,7 @@ err:
static void __exit ib_core_cleanup(void)
{