summaryrefslogtreecommitdiff
path: root/drivers/infiniband/sw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/sw')
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h19
-rw-r--r--drivers/infiniband/sw/rxe/rxe_av.c43
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c47
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hdr.h48
-rw-r--r--drivers/infiniband/sw/rxe/rxe_icrc.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mmap.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c122
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mw.c23
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c42
-rw-r--r--drivers/infiniband/sw/rxe/rxe_opcode.c35
-rw-r--r--drivers/infiniband/sw/rxe/rxe_opcode.h17
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c98
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c50
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c329
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c20
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c52
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.h19
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c106
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h7
-rw-r--r--drivers/infiniband/sw/siw/siw_cq.c24
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c40
25 files changed, 818 insertions, 361 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 51daac5c4feb..136c2efe3466 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -187,14 +187,14 @@ static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
exists = rxe_get_dev_from_net(ndev);
if (exists) {
ib_device_put(&exists->ib_dev);
- pr_err("already configured on %s\n", ndev->name);
+ rxe_dbg(exists, "already configured on %s\n", ndev->name);
err = -EEXIST;
goto err;
}
err = rxe_net_add(ibdev_name, ndev);
if (err) {
- pr_err("failed to add %s\n", ndev->name);
+ rxe_dbg(exists, "failed to add %s\n", ndev->name);
goto err;
}
err:
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index 30fbdf3bc76a..ab334900fcc3 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -38,6 +38,25 @@
#define RXE_ROCE_V2_SPORT (0xc000)
+#define rxe_dbg(rxe, fmt, ...) ibdev_dbg(&(rxe)->ib_dev, \
+ "%s: " fmt, __func__, ##__VA_ARGS__)
+#define rxe_dbg_uc(uc, fmt, ...) ibdev_dbg((uc)->ibuc.device, \
+ "uc#%d %s: " fmt, (uc)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_dbg_pd(pd, fmt, ...) ibdev_dbg((pd)->ibpd.device, \
+ "pd#%d %s: " fmt, (pd)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_dbg_ah(ah, fmt, ...) ibdev_dbg((ah)->ibah.device, \
+ "ah#%d %s: " fmt, (ah)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_dbg_srq(srq, fmt, ...) ibdev_dbg((srq)->ibsrq.device, \
+ "srq#%d %s: " fmt, (srq)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_dbg_qp(qp, fmt, ...) ibdev_dbg((qp)->ibqp.device, \
+ "qp#%d %s: " fmt, (qp)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_dbg_cq(cq, fmt, ...) ibdev_dbg((cq)->ibcq.device, \
+ "cq#%d %s: " fmt, (cq)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_dbg_mr(mr, fmt, ...) ibdev_dbg((mr)->ibmr.device, \
+ "mr#%d %s: " fmt, (mr)->elem.index, __func__, ##__VA_ARGS__)
+#define rxe_dbg_mw(mw, fmt, ...) ibdev_dbg((mw)->ibmw.device, \
+ "mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
+
void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name);
diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c
index 3b05314ca739..889d7adbd455 100644
--- a/drivers/infiniband/sw/rxe/rxe_av.c
+++ b/drivers/infiniband/sw/rxe/rxe_av.c
@@ -14,26 +14,45 @@ void rxe_init_av(struct rdma_ah_attr *attr, struct rxe_av *av)
memcpy(av->dmac, attr->roce.dmac, ETH_ALEN);
}
-int rxe_av_chk_attr(struct rxe_dev *rxe, struct rdma_ah_attr *attr)
+static int chk_attr(void *obj, struct rdma_ah_attr *attr, bool obj_is_ah)
{
const struct ib_global_route *grh = rdma_ah_read_grh(attr);
struct rxe_port *port;
+ struct rxe_dev *rxe;
+ struct rxe_qp *qp;
+ struct rxe_ah *ah;
int type;
+ if (obj_is_ah) {
+ ah = obj;
+ rxe = to_rdev(ah->ibah.device);
+ } else {
+ qp = obj;
+ rxe = to_rdev(qp->ibqp.device);
+ }
+
port = &rxe->port;
if (rdma_ah_get_ah_flags(attr) & IB_AH_GRH) {
if (grh->sgid_index > port->attr.gid_tbl_len) {
- pr_warn("invalid sgid index = %d\n",
- grh->sgid_index);
+ if (obj_is_ah)
+ rxe_dbg_ah(ah, "invalid sgid index = %d\n",
+ grh->sgid_index);
+ else
+ rxe_dbg_qp(qp, "invalid sgid index = %d\n",
+ grh->sgid_index);
return -EINVAL;
}
type = rdma_gid_attr_network_type(grh->sgid_attr);
if (type < RDMA_NETWORK_IPV4 ||
type > RDMA_NETWORK_IPV6) {
- pr_warn("invalid network type for rdma_rxe = %d\n",
- type);
+ if (obj_is_ah)
+ rxe_dbg_ah(ah, "invalid network type for rdma_rxe = %d\n",
+ type);
+ else
+ rxe_dbg_qp(qp, "invalid network type for rdma_rxe = %d\n",
+ type);
return -EINVAL;
}
}
@@ -41,6 +60,16 @@ int rxe_av_chk_attr(struct rxe_dev *rxe, struct rdma_ah_attr *attr)
return 0;
}
+int rxe_av_chk_attr(struct rxe_qp *qp, struct rdma_ah_attr *attr)
+{
+ return chk_attr(qp, attr, false);
+}
+
+int rxe_ah_chk_attr(struct rxe_ah *ah, struct rdma_ah_attr *attr)
+{
+ return chk_attr(ah, attr, true);
+}
+
void rxe_av_from_attr(u8 port_num, struct rxe_av *av,
struct rdma_ah_attr *attr)
{
@@ -121,12 +150,12 @@ struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt, struct rxe_ah **ahp)
/* only new user provider or kernel client */
ah = rxe_pool_get_index(&pkt->rxe->ah_pool, ah_num);
if (!ah) {
- pr_warn("Unable to find AH matching ah_num\n");
+ rxe_dbg_qp(pkt->qp, "Unable to find AH matching ah_num\n");
return NULL;
}
if (rxe_ah_pd(ah) != pkt->qp->pd) {
- pr_warn("PDs don't match for AH and QP\n");
+ rxe_dbg_qp(pkt->qp, "PDs don't match for AH and QP\n");
rxe_put(ah);
return NULL;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index fb0c008af78c..20737fec392b 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -104,6 +104,8 @@ static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode)
case IB_WR_LOCAL_INV: return IB_WC_LOCAL_INV;
case IB_WR_REG_MR: return IB_WC_REG_MR;
case IB_WR_BIND_MW: return IB_WC_BIND_MW;
+ case IB_WR_ATOMIC_WRITE: return IB_WC_ATOMIC_WRITE;
+ case IB_WR_FLUSH: return IB_WC_FLUSH;
default:
return 0xff;
@@ -114,11 +116,11 @@ void retransmit_timer(struct timer_list *t)
{
struct rxe_qp *qp = from_timer(qp, t, retrans_timer);
- pr_debug("%s: fired for qp#%d\n", __func__, qp->elem.index);
+ rxe_dbg_qp(qp, "retransmit timer fired\n");
if (qp->valid) {
qp->comp.timeout = 1;
- rxe_run_task(&qp->comp.task, 1);
+ rxe_sched_task(&qp->comp.task);
}
}
@@ -132,7 +134,10 @@ void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
if (must_sched != 0)
rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_COMPLETER_SCHED);
- rxe_run_task(&qp->comp.task, must_sched);
+ if (must_sched)
+ rxe_sched_task(&qp->comp.task);
+ else
+ rxe_run_task(&qp->comp.task);
}
static inline enum comp_state get_wqe(struct rxe_qp *qp,
@@ -200,6 +205,10 @@ static inline enum comp_state check_psn(struct rxe_qp *qp,
*/
if (pkt->psn == wqe->last_psn)
return COMPST_COMP_ACK;
+ else if (pkt->opcode == IB_OPCODE_RC_ACKNOWLEDGE &&
+ (qp->comp.opcode == IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST ||
+ qp->comp.opcode == IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE))
+ return COMPST_CHECK_ACK;
else
return COMPST_DONE;
} else if ((diff > 0) && (wqe->mask & WR_ATOMIC_OR_READ_MASK)) {
@@ -228,6 +237,10 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST:
case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
+ /* Check NAK code to handle a remote error */
+ if (pkt->opcode == IB_OPCODE_RC_ACKNOWLEDGE)
+ break;
+
if (pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE &&
pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST) {
/* read retries of partial data may restart from
@@ -258,12 +271,16 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
if ((syn & AETH_TYPE_MASK) != AETH_ACK)
return COMPST_ERROR;
+ if (wqe->wr.opcode == IB_WR_ATOMIC_WRITE)
+ return COMPST_WRITE_SEND;
+
fallthrough;
/* (IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE doesn't have an AETH)
*/
case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
if (wqe->wr.opcode != IB_WR_RDMA_READ &&
- wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) {
+ wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV &&
+ wqe->wr.opcode != IB_WR_FLUSH) {
wqe->status = IB_WC_FATAL_ERR;
return COMPST_ERROR;
}
@@ -305,7 +322,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
qp->comp.psn = pkt->psn;
if (qp->req.wait_psn) {
qp->req.wait_psn = 0;
- rxe_run_task(&qp->req.task, 0);
+ rxe_run_task(&qp->req.task);
}
}
return COMPST_ERROR_RETRY;
@@ -323,7 +340,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
return COMPST_ERROR;
default:
- pr_warn("unexpected nak %x\n", syn);
+ rxe_dbg_qp(qp, "unexpected nak %x\n", syn);
wqe->status = IB_WC_REM_OP_ERR;
return COMPST_ERROR;
}
@@ -334,7 +351,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
break;
default:
- pr_warn("unexpected opcode\n");
+ rxe_dbg_qp(qp, "unexpected opcode\n");
}
return COMPST_ERROR;
@@ -452,7 +469,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
*/
if (qp->req.wait_fence) {
qp->req.wait_fence = 0;
- rxe_run_task(&qp->req.task, 0);
+ rxe_run_task(&qp->req.task);
}
}
@@ -466,7 +483,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
if (qp->req.need_rd_atomic) {
qp->comp.timeout_retry = 0;
qp->req.need_rd_atomic = 0;
- rxe_run_task(&qp->req.task, 0);
+ rxe_run_task(&qp->req.task);
}
}
@@ -512,7 +529,7 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp,
if (qp->req.wait_psn) {
qp->req.wait_psn = 0;
- rxe_run_task(&qp->req.task, 1);
+ rxe_sched_task(&qp->req.task);
}
}
@@ -587,8 +604,7 @@ int rxe_completer(void *arg)
state = COMPST_GET_ACK;
while (1) {
- pr_debug("qp#%d state = %s\n", qp_num(qp),
- comp_state_name[state]);
+ rxe_dbg_qp(qp, "state = %s\n", comp_state_name[state]);
switch (state) {
case COMPST_GET_ACK:
skb = skb_dequeue(&qp->resp_pkts);
@@ -646,7 +662,7 @@ int rxe_completer(void *arg)
if (qp->req.wait_psn) {
qp->req.wait_psn = 0;
- rxe_run_task(&qp->req.task, 1);
+ rxe_sched_task(&qp->req.task);
}
state = COMPST_DONE;
@@ -714,7 +730,7 @@ int rxe_completer(void *arg)
RXE_CNT_COMP_RETRY);
qp->req.need_retry = 1;
qp->comp.started_retry = 1;
- rxe_run_task(&qp->req.task, 0);
+ rxe_run_task(&qp->req.task);
}
goto done;
@@ -735,8 +751,7 @@ int rxe_completer(void *arg)
* rnr timer has fired
*/
qp->req.wait_for_rnr_timer = 1;
- pr_debug("qp#%d set rnr nak timer\n",
- qp_num(qp));
+ rxe_dbg_qp(qp, "set rnr nak timer\n");
mod_timer(&qp->rnr_nak_timer,
jiffies + rnrnak_jiffies(aeth_syn(pkt)
& ~AETH_TYPE_MASK));
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index b1a0ab3cd4bd..1df186534639 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -14,12 +14,12 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
int count;
if (cqe <= 0) {
- pr_warn("cqe(%d) <= 0\n", cqe);
+ rxe_dbg(rxe, "cqe(%d) <= 0\n", cqe);
goto err1;
}
if (cqe > rxe->attr.max_cqe) {
- pr_debug("cqe(%d) > max_cqe(%d)\n",
+ rxe_dbg(rxe, "cqe(%d) > max_cqe(%d)\n",
cqe, rxe->attr.max_cqe);
goto err1;
}
@@ -27,7 +27,7 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
if (cq) {
count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT);
if (cqe < count) {
- pr_debug("cqe(%d) < current # elements in queue (%d)",
+ rxe_dbg_cq(cq, "cqe(%d) < current # elements in queue (%d)",
cqe, count);
goto err1;
}
@@ -65,7 +65,7 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
cq->queue = rxe_queue_init(rxe, &cqe,
sizeof(struct rxe_cqe), type);
if (!cq->queue) {
- pr_warn("unable to create cq\n");
+ rxe_dbg(rxe, "unable to create cq\n");
return -ENOMEM;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_hdr.h b/drivers/infiniband/sw/rxe/rxe_hdr.h
index e432f9e37795..46f82b27fcd2 100644
--- a/drivers/infiniband/sw/rxe/rxe_hdr.h
+++ b/drivers/infiniband/sw/rxe/rxe_hdr.h
@@ -608,6 +608,52 @@ static inline void reth_set_len(struct rxe_pkt_info *pkt, u32 len)
}
/******************************************************************************
+ * FLUSH Extended Transport Header
+ ******************************************************************************/
+
+struct rxe_feth {
+ __be32 bits;
+};
+
+#define FETH_PLT_MASK (0x0000000f) /* bits 3-0 */
+#define FETH_SEL_MASK (0x00000030) /* bits 5-4 */
+#define FETH_SEL_SHIFT (4U)
+
+static inline u32 __feth_plt(void *arg)
+{
+ struct rxe_feth *feth = arg;
+
+ return be32_to_cpu(feth->bits) & FETH_PLT_MASK;
+}
+
+static inline u32 __feth_sel(void *arg)
+{
+ struct rxe_feth *feth = arg;
+
+ return (be32_to_cpu(feth->bits) & FETH_SEL_MASK) >> FETH_SEL_SHIFT;
+}
+
+static inline u32 feth_plt(struct rxe_pkt_info *pkt)
+{
+ return __feth_plt(pkt->hdr + rxe_opcode[pkt->opcode].offset[RXE_FETH]);
+}
+
+static inline u32 feth_sel(struct rxe_pkt_info *pkt)
+{
+ return __feth_sel(pkt->hdr + rxe_opcode[pkt->opcode].offset[RXE_FETH]);
+}
+
+static inline void feth_init(struct rxe_pkt_info *pkt, u8 type, u8 level)
+{
+ struct rxe_feth *feth = (struct rxe_feth *)
+ (pkt->hdr + rxe_opcode[pkt->opcode].offset[RXE_FETH]);
+ u32 bits = ((level << FETH_SEL_SHIFT) & FETH_SEL_MASK) |
+ (type & FETH_PLT_MASK);
+
+ feth->bits = cpu_to_be32(bits);
+}
+
+/******************************************************************************
* Atomic Extended Transport Header
******************************************************************************/
struct rxe_atmeth {
@@ -742,7 +788,6 @@ enum aeth_syndrome {
AETH_NAK_INVALID_REQ = 0x61,
AETH_NAK_REM_ACC_ERR = 0x62,
AETH_NAK_REM_OP_ERR = 0x63,
- AETH_NAK_INV_RD_REQ = 0x64,
};
static inline u8 __aeth_syn(void *arg)
@@ -910,6 +955,7 @@ enum rxe_hdr_length {
RXE_ATMETH_BYTES = sizeof(struct rxe_atmeth),
RXE_IETH_BYTES = sizeof(struct rxe_ieth),
RXE_RDETH_BYTES = sizeof(struct rxe_rdeth),
+ RXE_FETH_BYTES = sizeof(struct rxe_feth),
};
static inline size_t header_size(struct rxe_pkt_info *pkt)
diff --git a/drivers/infiniband/sw/rxe/rxe_icrc.c b/drivers/infiniband/sw/rxe/rxe_icrc.c
index 46bb07c5c4df..71bc2c189588 100644
--- a/drivers/infiniband/sw/rxe/rxe_icrc.c
+++ b/drivers/infiniband/sw/rxe/rxe_icrc.c
@@ -21,7 +21,7 @@ int rxe_icrc_init(struct rxe_dev *rxe)
tfm = crypto_alloc_shash("crc32", 0, 0);
if (IS_ERR(tfm)) {
- pr_warn("failed to init crc32 algorithm err:%ld\n",
+ rxe_dbg(rxe, "failed to init crc32 algorithm err: %ld\n",
PTR_ERR(tfm));
return PTR_ERR(tfm);
}
@@ -51,7 +51,7 @@ static __be32 rxe_crc32(struct rxe_dev *rxe, __be32 crc, void *next, size_t len)
*(__be32 *)shash_desc_ctx(shash) = crc;
err = crypto_shash_update(shash, next, len);
if (unlikely(err)) {
- pr_warn_ratelimited("failed crc calculation, err: %d\n", err);
+ rxe_dbg(rxe, "failed crc calculation, err: %d\n", err);
return (__force __be32)crc32_le((__force u32)crc, next, len);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index c2a5c8814a48..948ce4902b10 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -9,16 +9,12 @@
/* rxe_av.c */
void rxe_init_av(struct rdma_ah_attr *attr, struct rxe_av *av);
-
-int rxe_av_chk_attr(struct rxe_dev *rxe, struct rdma_ah_attr *attr);
-
+int rxe_av_chk_attr(struct rxe_qp *qp, struct rdma_ah_attr *attr);
+int rxe_ah_chk_attr(struct rxe_ah *ah, struct rdma_ah_attr *attr);
void rxe_av_from_attr(u8 port_num, struct rxe_av *av,
struct rdma_ah_attr *attr);
-
void rxe_av_to_attr(struct rxe_av *av, struct rdma_ah_attr *attr);
-
void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr);
-
struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt, struct rxe_ah **ahp);
/* rxe_cq.c */
@@ -68,6 +64,7 @@ void rxe_mr_init_dma(int access, struct rxe_mr *mr);
int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
int access, struct rxe_mr *mr);
int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr);
+int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, int length);
int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
enum rxe_mr_copy_dir dir);
int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
index 9149b6095429..a47d72dbc537 100644
--- a/drivers/infiniband/sw/rxe/rxe_mmap.c
+++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
@@ -79,7 +79,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
/* Don't allow a mmap larger than the object. */
if (size > ip->info.size) {
- pr_err("mmap region is larger than the object!\n");
+ rxe_dbg(rxe, "mmap region is larger than the object!\n");
spin_unlock_bh(&rxe->pending_lock);
ret = -EINVAL;
goto done;
@@ -87,7 +87,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
goto found_it;
}
- pr_warn("unable to find pending mmap info\n");
+ rxe_dbg(rxe, "unable to find pending mmap info\n");
spin_unlock_bh(&rxe->pending_lock);
ret = -EINVAL;
goto done;
@@ -98,7 +98,7 @@ found_it:
ret = remap_vmalloc_range(vma, ip->obj, 0);
if (ret) {
- pr_err("err %d from remap_vmalloc_range\n", ret);
+ rxe_dbg(rxe, "err %d from remap_vmalloc_range\n", ret);
goto done;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 502e9ada99b3..072eac4b65d2 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -4,6 +4,8 @@
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
*/
+#include <linux/libnvdimm.h>
+
#include "rxe.h"
#include "rxe_loc.h"
@@ -26,7 +28,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
{
- switch (mr->type) {
+ switch (mr->ibmr.type) {
case IB_MR_TYPE_DMA:
return 0;
@@ -38,8 +40,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
return 0;
default:
- pr_warn("%s: mr type (%d) not supported\n",
- __func__, mr->type);
+ rxe_dbg_mr(mr, "type (%d) not supported\n", mr->ibmr.type);
return -EFAULT;
}
}
@@ -62,7 +63,6 @@ static void rxe_mr_init(int access, struct rxe_mr *mr)
mr->rkey = mr->ibmr.rkey = rkey;
mr->state = RXE_MR_STATE_INVALID;
- mr->map_shift = ilog2(RXE_BUF_PER_MAP);
}
static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
@@ -99,6 +99,7 @@ err2:
kfree(mr->map[i]);
kfree(mr->map);
+ mr->map = NULL;
err1:
return -ENOMEM;
}
@@ -109,7 +110,16 @@ void rxe_mr_init_dma(int access, struct rxe_mr *mr)
mr->access = access;
mr->state = RXE_MR_STATE_VALID;
- mr->type = IB_MR_TYPE_DMA;
+ mr->ibmr.type = IB_MR_TYPE_DMA;
+}
+
+static bool is_pmem_page(struct page *pg)
+{
+ unsigned long paddr = page_to_phys(pg);
+
+ return REGION_INTERSECTS ==
+ region_intersects(paddr, PAGE_SIZE, IORESOURCE_MEM,
+ IORES_DESC_PERSISTENT_MEMORY);
}
int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
@@ -122,12 +132,11 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
int num_buf;
void *vaddr;
int err;
- int i;
umem = ib_umem_get(&rxe->ib_dev, start, length, access);
if (IS_ERR(umem)) {
- pr_warn("%s: Unable to pin memory region err = %d\n",
- __func__, (int)PTR_ERR(umem));
+ rxe_dbg_mr(mr, "Unable to pin memory region err = %d\n",
+ (int)PTR_ERR(umem));
err = PTR_ERR(umem);
goto err_out;
}
@@ -138,8 +147,7 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
err = rxe_mr_alloc(mr, num_buf);
if (err) {
- pr_warn("%s: Unable to allocate memory for map\n",
- __func__);
+ rxe_dbg_mr(mr, "Unable to allocate memory for map\n");
goto err_release_umem;
}
@@ -149,23 +157,30 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
num_buf = 0;
map = mr->map;
if (length > 0) {
- buf = map[0]->buf;
+ bool persistent_access = access & IB_ACCESS_FLUSH_PERSISTENT;
+ buf = map[0]->buf;
for_each_sgtable_page (&umem->sgt_append.sgt, &sg_iter, 0) {
+ struct page *pg = sg_page_iter_page(&sg_iter);
+
+ if (persistent_access && !is_pmem_page(pg)) {
+ rxe_dbg_mr(mr, "Unable to register persistent access to non-pmem device\n");
+ err = -EINVAL;
+ goto err_release_umem;
+ }
+
if (num_buf >= RXE_BUF_PER_MAP) {
map++;
buf = map[0]->buf;
num_buf = 0;
}
- vaddr = page_address(sg_page_iter_page(&sg_iter));
+ vaddr = page_address(pg);
if (!vaddr) {
- pr_warn("%s: Unable to get virtual address\n",
- __func__);
+ rxe_dbg_mr(mr, "Unable to get virtual address\n");
err = -ENOMEM;
- goto err_cleanup_map;
+ goto err_release_umem;
}
-
buf->addr = (uintptr_t)vaddr;
buf->size = PAGE_SIZE;
num_buf++;
@@ -178,14 +193,11 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
mr->access = access;
mr->offset = ib_umem_offset(umem);
mr->state = RXE_MR_STATE_VALID;
- mr->type = IB_MR_TYPE_USER;
+ mr->ibmr.type = IB_MR_TYPE_USER;
+ mr->ibmr.page_size = PAGE_SIZE;
return 0;
-err_cleanup_map:
- for (i = 0; i < mr->num_map; i++)
- kfree(mr->map[i]);
- kfree(mr->map);
err_release_umem:
ib_umem_release(umem);
err_out:
@@ -205,7 +217,7 @@ int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr)
mr->max_buf = max_pages;
mr->state = RXE_MR_STATE_FREE;
- mr->type = IB_MR_TYPE_MEM_REG;
+ mr->ibmr.type = IB_MR_TYPE_MEM_REG;
return 0;
@@ -256,7 +268,7 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
void *addr;
if (mr->state != RXE_MR_STATE_VALID) {
- pr_warn("mr not in valid state\n");
+ rxe_dbg_mr(mr, "Not in valid state\n");
addr = NULL;
goto out;
}
@@ -267,7 +279,7 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
}
if (mr_check_range(mr, iova, length)) {
- pr_warn("range violation\n");
+ rxe_dbg_mr(mr, "Range violation\n");
addr = NULL;
goto out;
}
@@ -275,7 +287,7 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
lookup_iova(mr, iova, &m, &n, &offset);
if (offset + length > mr->map[m]->buf[n].size) {
- pr_warn("crosses page boundary\n");
+ rxe_dbg_mr(mr, "Crosses page boundary\n");
addr = NULL;
goto out;
}
@@ -286,6 +298,39 @@ out:
return addr;
}
+int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, int length)
+{
+ size_t offset;
+
+ if (length == 0)
+ return 0;
+
+ if (mr->ibmr.type == IB_MR_TYPE_DMA)
+ return -EFAULT;
+
+ offset = (iova - mr->ibmr.iova + mr->offset) & mr->page_mask;
+ while (length > 0) {
+ u8 *va;
+ int bytes;
+
+ bytes = mr->ibmr.page_size - offset;
+ if (bytes > length)
+ bytes = length;
+
+ va = iova_to_vaddr(mr, iova, length);
+ if (!va)
+ return -EFAULT;
+
+ arch_wb_cache_pmem(va, bytes);
+
+ length -= bytes;
+ iova += bytes;
+ offset = 0;
+ }
+
+ return 0;
+}
+
/* copy data from a range (vaddr, vaddr+length-1) to or from
* a mr object starting at iova.
*/
@@ -304,7 +349,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
if (length == 0)
return 0;
- if (mr->type == IB_MR_TYPE_DMA) {
+ if (mr->ibmr.type == IB_MR_TYPE_DMA) {
u8 *src, *dest;
src = (dir == RXE_TO_MR_OBJ) ? addr : ((void *)(uintptr_t)iova);
@@ -511,7 +556,7 @@ struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
if (unlikely((type == RXE_LOOKUP_LOCAL && mr->lkey != key) ||
(type == RXE_LOOKUP_REMOTE && mr->rkey != key) ||
- mr_pd(mr) != pd || (access && !(access & mr->access)) ||
+ mr_pd(mr) != pd || ((access & mr->access) != access) ||
mr->state != RXE_MR_STATE_VALID)) {
rxe_put(mr);
mr = NULL;
@@ -528,27 +573,26 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 key)
mr = rxe_pool_get_index(&rxe->mr_pool, key >> 8);
if (!mr) {
- pr_err("%s: No MR for key %#x\n", __func__, key);
+ rxe_dbg_qp(qp, "No MR for key %#x\n", key);
ret = -EINVAL;
goto err;
}
if (mr->rkey ? (key != mr->rkey) : (key != mr->lkey)) {
- pr_err("%s: wr key (%#x) doesn't match mr key (%#x)\n",
- __func__, key, (mr->rkey ? mr->rkey : mr->lkey));
+ rxe_dbg_mr(mr, "wr key (%#x) doesn't match mr key (%#x)\n",
+ key, (mr->rkey ? mr->rkey : mr->lkey));
ret = -EINVAL;
goto err_drop_ref;
}
if (atomic_read(&mr->num_mw) > 0) {
- pr_warn("%s: Attempt to invalidate an MR while bound to MWs\n",
- __func__);
+ rxe_dbg_mr(mr, "Attempt to invalidate an MR while bound to MWs\n");
ret = -EINVAL;
goto err_drop_ref;
}
- if (unlikely(mr->type != IB_MR_TYPE_MEM_REG)) {
- pr_warn("%s: mr->type (%d) is wrong type\n", __func__, mr->type);
+ if (unlikely(mr->ibmr.type != IB_MR_TYPE_MEM_REG)) {
+ rxe_dbg_mr(mr, "Type (%d) is wrong\n", mr->ibmr.type);
ret = -EINVAL;
goto err_drop_ref;
}
@@ -577,22 +621,20 @@ int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
/* user can only register MR in free state */
if (unlikely(mr->state != RXE_MR_STATE_FREE)) {
- pr_warn("%s: mr->lkey = 0x%x not free\n",
- __func__, mr->lkey);
+ rxe_dbg_mr(mr, "mr->lkey = 0x%x not free\n", mr->lkey);
return -EINVAL;
}
/* user can only register mr with qp in same protection domain */
if (unlikely(qp->ibqp.pd != mr->ibmr.pd)) {
- pr_warn("%s: qp->pd and mr->pd don't match\n",
- __func__);
+ rxe_dbg_mr(mr, "qp->pd and mr->pd don't match\n");
return -EINVAL;
}
/* user is only allowed to change key portion of l/rkey */
if (unlikely((mr->lkey & ~0xff) != (key & ~0xff))) {
- pr_warn("%s: key = 0x%x has wrong index mr->lkey = 0x%x\n",
- __func__, key, mr->lkey);
+ rxe_dbg_mr(mr, "key = 0x%x has wrong index mr->lkey = 0x%x\n",
+ key, mr->lkey);
return -EINVAL;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c
index 902b7df7aaed..afa5ce1a7116 100644
--- a/drivers/infiniband/sw/rxe/rxe_mw.c
+++ b/drivers/infiniband/sw/rxe/rxe_mw.c
@@ -52,14 +52,14 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
{
if (mw->ibmw.type == IB_MW_TYPE_1) {
if (unlikely(mw->state != RXE_MW_STATE_VALID)) {
- pr_err_once(
+ rxe_dbg_mw(mw,
"attempt to bind a type 1 MW not in the valid state\n");
return -EINVAL;
}
/* o10-36.2.2 */
if (unlikely((mw->access & IB_ZERO_BASED))) {
- pr_err_once("attempt to bind a zero based type 1 MW\n");
+ rxe_dbg_mw(mw, "attempt to bind a zero based type 1 MW\n");
return -EINVAL;
}
}
@@ -67,21 +67,21 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
if (mw->ibmw.type == IB_MW_TYPE_2) {
/* o10-37.2.30 */
if (unlikely(mw->state != RXE_MW_STATE_FREE)) {
- pr_err_once(
+ rxe_dbg_mw(mw,
"attempt to bind a type 2 MW not in the free state\n");
return -EINVAL;
}
/* C10-72 */
if (unlikely(qp->pd != to_rpd(mw->ibmw.pd))) {
- pr_err_once(
+ rxe_dbg_mw(mw,
"attempt to bind type 2 MW with qp with different PD\n");
return -EINVAL;
}
/* o10-37.2.40 */
if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) {
- pr_err_once(
+ rxe_dbg_mw(mw,
"attempt to invalidate type 2 MW by binding with NULL or zero length MR\n");
return -EINVAL;
}
@@ -92,13 +92,13 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
return 0;
if (unlikely(mr->access & IB_ZERO_BASED)) {
- pr_err_once("attempt to bind MW to zero based MR\n");
+ rxe_dbg_mw(mw, "attempt to bind MW to zero based MR\n");
return -EINVAL;