diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-22 12:41:14 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-22 12:41:14 -0700 |
| commit | 5266e5b12c8b73587130325f7074d2f49ef9e427 (patch) | |
| tree | 79abfbf1b047de05860fa832f66fce5078cc6434 | |
| parent | fc739eba99dc8655369a07ead098de9960e48fff (diff) | |
| parent | 5e47f1985d7107331c3f64fb3ec83d66fd73577e (diff) | |
| download | linux-5266e5b12c8b73587130325f7074d2f49ef9e427.tar.gz linux-5266e5b12c8b73587130325f7074d2f49ef9e427.tar.bz2 linux-5266e5b12c8b73587130325f7074d2f49ef9e427.zip | |
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target updates from Nicholas Bellinger:
"The highlights this round include:
- Add target_alloc_session() w/ callback helper for doing se_session
allocation + tag + se_node_acl lookup. (HCH + nab)
- Tree-wide fabric driver conversion to use target_alloc_session()
- Convert sbp-target to use percpu_ida tag pre-allocation, and
TARGET_SCF_ACK_KREF I/O krefs (Chris Boot + nab)
- Convert usb-gadget to use percpu_ida tag pre-allocation, and
TARGET_SCF_ACK_KREF I/O krefs (Andrzej Pietrasiewicz + nab)
- Convert xen-scsiback to use percpu_ida tag pre-allocation, and
TARGET_SCF_ACK_KREF I/O krefs (Juergen Gross + nab)
- Convert tcm_fc to use TARGET_SCF_ACK_KREF I/O + TMR krefs
- Convert ib_srpt to use percpu_ida tag pre-allocation
- Add DebugFS node for qla2xxx target sess list (Quinn)
- Rework iser-target connection termination (Jenny + Sagi)
- Convert iser-target to new CQ API (HCH)
- Add pass-through WRITE_SAME support for IBLOCK (Mike Christie)
- Introduce data_bitmap for asynchronous access of data area (Sheng
Yang + Andy)
- Fix target_release_cmd_kref shutdown comp leak (Himanshu Madhani)
Also, there is a separate PULL request coming for cxgb4 NIC driver
prerequisites for supporting hw iscsi segmentation offload (ISO), that
will be the base for a number of v4.7 developments involving
iscsi-target hw offloads"
* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (36 commits)
target: Fix target_release_cmd_kref shutdown comp leak
target: Avoid DataIN transfers for non-GOOD SAM status
target/user: Report capability of handling out-of-order completions to userspace
target/user: Fix size_t format-spec build warning
target/user: Don't free expired command when time out
target/user: Introduce data_bitmap, replace data_length/data_head/data_tail
target/user: Free data ring in unified function
target/user: Use iovec[] to describe continuous area
target: Remove enum transport_lunflags_table
target/iblock: pass WRITE_SAME to device if possible
iser-target: Kill the ->isert_cmd back pointer in struct iser_tx_desc
iser-target: Kill struct isert_rdma_wr
iser-target: Convert to new CQ API
iser-target: Split and properly type the login buffer
iser-target: Remove ISER_RECV_DATA_SEG_LEN
iser-target: Remove impossible condition from isert_wait_conn
iser-target: Remove redundant wait in release_conn
iser-target: Rework connection termination
iser-target: Separate flows for np listeners and connections cma events
iser-target: Add new state ISER_CONN_BOUND to isert_conn
...
29 files changed, 1240 insertions, 1195 deletions
diff --git a/Documentation/target/tcmu-design.txt b/Documentation/target/tcmu-design.txt index bef81e42788f..4cebc1ebf99a 100644 --- a/Documentation/target/tcmu-design.txt +++ b/Documentation/target/tcmu-design.txt @@ -117,7 +117,9 @@ userspace (respectively) to put commands on the ring, and indicate when the commands are completed. version - 1 (userspace should abort if otherwise) -flags - none yet defined. +flags: +- TCMU_MAILBOX_FLAG_CAP_OOOC: indicates out-of-order completion is + supported. See "The Command Ring" for details. cmdr_off - The offset of the start of the command ring from the start of the memory region, to account for the mailbox size. cmdr_size - The size of the command ring. This does *not* need to be a @@ -162,6 +164,13 @@ rsp.sense_buffer if necessary. Userspace then increments mailbox.cmd_tail by entry.hdr.length (mod cmdr_size) and signals the kernel via the UIO method, a 4-byte write to the file descriptor. +If TCMU_MAILBOX_FLAG_CAP_OOOC is set for mailbox->flags, kernel is +capable of handling out-of-order completions. In this case, userspace can +handle command in different order other than original. Since kernel would +still process the commands in the same order it appeared in the command +ring, userspace need to update the cmd->id when completing the +command(a.k.a steal the original command's entry). + When the opcode is PAD, userspace only updates cmd_tail as above -- it's a no-op. (The kernel inserts PAD entries to ensure each CMD entry is contiguous within the command ring.) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index f121e6129339..60b30d338a81 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -49,22 +49,25 @@ static struct workqueue_struct *isert_release_wq; static void isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); static int -isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, - struct isert_rdma_wr *wr); +isert_map_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn); static void isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); static int -isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, - struct isert_rdma_wr *wr); +isert_reg_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn); static int isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd); static int -isert_rdma_post_recvl(struct isert_conn *isert_conn); +isert_login_post_recv(struct isert_conn *isert_conn); static int isert_rdma_accept(struct isert_conn *isert_conn); struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np); static void isert_release_work(struct work_struct *work); +static void isert_wait4flush(struct isert_conn *isert_conn); +static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc); +static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc); +static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc); +static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc); static inline bool isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd) @@ -177,12 +180,6 @@ err: return ret; } -static void -isert_cq_event_callback(struct ib_event *e, void *context) -{ - isert_dbg("event: %d\n", e->event); -} - static int isert_alloc_rx_descriptors(struct isert_conn *isert_conn) { @@ -212,6 +209,7 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn) rx_sg->addr = rx_desc->dma_addr; rx_sg->length = ISER_RX_PAYLOAD_SIZE; rx_sg->lkey = device->pd->local_dma_lkey; + rx_desc->rx_cqe.done = isert_recv_done; } return 0; @@ -250,9 +248,6 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn) isert_conn->rx_descs = NULL; } -static void isert_cq_work(struct work_struct *); -static void isert_cq_callback(struct ib_cq *, void *); - static void isert_free_comps(struct isert_device *device) { @@ -261,10 +256,8 @@ isert_free_comps(struct isert_device *device) for (i = 0; i < device->comps_used; i++) { struct isert_comp *comp = &device->comps[i]; - if (comp->cq) { - cancel_work_sync(&comp->work); - ib_destroy_cq(comp->cq); - } + if (comp->cq) + ib_free_cq(comp->cq); } kfree(device->comps); } @@ -293,28 +286,17 @@ isert_alloc_comps(struct isert_device *device) max_cqe = min(ISER_MAX_CQ_LEN, device->ib_device->attrs.max_cqe); for (i = 0; i < device->comps_used; i++) { - struct ib_cq_init_attr cq_attr = {}; struct isert_comp *comp = &device->comps[i]; comp->device = device; - INIT_WORK(&comp->work, isert_cq_work); - cq_attr.cqe = max_cqe; - cq_attr.comp_vector = i; - comp->cq = ib_create_cq(device->ib_device, - isert_cq_callback, - isert_cq_event_callback, - (void *)comp, - &cq_attr); + comp->cq = ib_alloc_cq(device->ib_device, comp, max_cqe, i, + IB_POLL_WORKQUEUE); if (IS_ERR(comp->cq)) { isert_err("Unable to allocate cq\n"); ret = PTR_ERR(comp->cq); comp->cq = NULL; goto out_cq; } - - ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP); - if (ret) - goto out_cq; } return 0; @@ -582,7 +564,6 @@ isert_init_conn(struct isert_conn *isert_conn) INIT_LIST_HEAD(&isert_conn->node); init_completion(&isert_conn->login_comp); init_completion(&isert_conn->login_req_comp); - init_completion(&isert_conn->wait); kref_init(&isert_conn->kref); mutex_init(&isert_conn->mutex); spin_lock_init(&isert_conn->pool_lock); @@ -596,11 +577,13 @@ isert_free_login_buf(struct isert_conn *isert_conn) struct ib_device *ib_dev = isert_conn->device->ib_device; ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, - ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE); + ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE); + kfree(isert_conn->login_rsp_buf); + ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, - ISCSI_DEF_MAX_RECV_SEG_LEN, + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); - kfree(isert_conn->login_buf); + kfree(isert_conn->login_req_buf); } static int @@ -609,50 +592,48 @@ isert_alloc_login_buf(struct isert_conn *isert_conn, { int ret; - isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + - ISER_RX_LOGIN_SIZE, GFP_KERNEL); - if (!isert_conn->login_buf) { + isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf), + GFP_KERNEL); + if (!isert_conn->login_req_buf) { isert_err("Unable to allocate isert_conn->login_buf\n"); return -ENOMEM; } - isert_conn->login_req_buf = isert_conn->login_buf; - isert_conn->login_rsp_buf = isert_conn->login_buf + - ISCSI_DEF_MAX_RECV_SEG_LEN; - - isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n", - isert_conn->login_buf, isert_conn->login_req_buf, - isert_conn->login_rsp_buf); - isert_conn->login_req_dma = ib_dma_map_single(ib_dev, - (void *)isert_conn->login_req_buf, - ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE); - + isert_conn->login_req_buf, + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); if (ret) { isert_err("login_req_dma mapping error: %d\n", ret); isert_conn->login_req_dma = 0; - goto out_login_buf; + goto out_free_login_req_buf; } - isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev, - (void *)isert_conn->login_rsp_buf, - ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE); + isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL); + if (!isert_conn->login_rsp_buf) { + isert_err("Unable to allocate isert_conn->login_rspbuf\n"); + goto out_unmap_login_req_buf; + } + isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev, + isert_conn->login_rsp_buf, + ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE); ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); if (ret) { isert_err("login_rsp_dma mapping error: %d\n", ret); isert_conn->login_rsp_dma = 0; - goto out_req_dma_map; + goto out_free_login_rsp_buf; } return 0; -out_req_dma_map: +out_free_login_rsp_buf: + kfree(isert_conn->login_rsp_buf); +out_unmap_login_req_buf: ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, - ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE); -out_login_buf: - kfree(isert_conn->login_buf); + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); +out_free_login_req_buf: + kfree(isert_conn->login_req_buf); return ret; } @@ -726,7 +707,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) if (ret) goto out_conn_dev; - ret = isert_rdma_post_recvl(isert_conn); + ret = isert_login_post_recv(isert_conn); if (ret) goto out_conn_dev; @@ -773,7 +754,7 @@ isert_connect_release(struct isert_conn *isert_conn) ib_destroy_qp(isert_conn->qp); } - if (isert_conn->login_buf) + if (isert_conn->login_req_buf) isert_free_login_buf(isert_conn); isert_device_put(device); @@ -820,12 +801,30 @@ isert_put_conn(struct isert_conn *isert_conn) kref_put(&isert_conn->kref, isert_release_kref); } +static void +isert_handle_unbound_conn(struct isert_conn *isert_conn) +{ + struct isert_np *isert_np = isert_conn->cm_id->context; + + mutex_lock(&isert_np->mutex); + if (!list_empty(&isert_conn->node)) { + /* + * This means iscsi doesn't know this connection + * so schedule a cleanup ourselves + */ + list_del_init(&isert_conn->node); + isert_put_conn(isert_conn); + queue_work(isert_release_wq, &isert_conn->release_work); + } + mutex_unlock(&isert_np->mutex); +} + /** * isert_conn_terminate() - Initiate connection termination * @isert_conn: isert connection struct * * Notes: - * In case the connection state is FULL_FEATURE, move state + * In case the connection state is BOUND, move state * to TEMINATING and start teardown sequence (rdma_disconnect). * In case the connection state is UP, complete flush as well. * @@ -837,23 +836,16 @@ isert_conn_terminate(struct isert_conn *isert_conn) { int err; - switch (isert_conn->state) { - case ISER_CONN_TERMINATING: - break; - case ISER_CONN_UP: - case ISER_CONN_FULL_FEATURE: /* FALLTHRU */ - isert_info("Terminating conn %p state %d\n", - isert_conn, isert_conn->state); - isert_conn->state = ISER_CONN_TERMINATING; - err = rdma_disconnect(isert_conn->cm_id); - if (err) - isert_warn("Failed rdma_disconnect isert_conn %p\n", - isert_conn); - break; - default: - isert_warn("conn %p teminating in state %d\n", - isert_conn, isert_conn->state); - } + if (isert_conn->state >= ISER_CONN_TERMINATING) + return; + + isert_info("Terminating conn %p state %d\n", + isert_conn, isert_conn->state); + isert_conn->state = ISER_CONN_TERMINATING; + err = rdma_disconnect(isert_conn->cm_id); + if (err) + isert_warn("Failed rdma_disconnect isert_conn %p\n", + isert_conn); } static int @@ -887,35 +879,27 @@ static int isert_disconnected_handler(struct rdma_cm_id *cma_id, enum rdma_cm_event_type event) { - struct isert_np *isert_np = cma_id->context; - struct isert_conn *isert_conn; - bool terminating = false; - - if (isert_np->cm_id == cma_id) - return isert_np_cma_handler(cma_id->context, event); - - isert_conn = cma_id->qp->qp_context; + struct isert_conn *isert_conn = cma_id->qp->qp_context; mutex_lock(&isert_conn->mutex); - terminating = (isert_conn->state == ISER_CONN_TERMINATING); - isert_conn_terminate(isert_conn); - mutex_unlock(&isert_conn->mutex); - - isert_info("conn %p completing wait\n", isert_conn); - complete(&isert_conn->wait); - - if (terminating) - goto out; - - mutex_lock(&isert_np->mutex); - if (!list_empty(&isert_conn->node)) { - list_del_init(&isert_conn->node); - isert_put_conn(isert_conn); - queue_work(isert_release_wq, &isert_conn->release_work); + switch (isert_conn->state) { + case ISER_CONN_TERMINATING: + break; + case ISER_CONN_UP: + isert_conn_terminate(isert_conn); + isert_wait4flush(isert_conn); + isert_handle_unbound_conn(isert_conn); + break; + case ISER_CONN_BOUND: + case ISER_CONN_FULL_FEATURE: /* FALLTHRU */ + iscsit_cause_connection_reinstatement(isert_conn->conn, 0); + break; + default: + isert_warn("conn %p teminating in state %d\n", + isert_conn, isert_conn->state); } - mutex_unlock(&isert_np->mutex); + mutex_unlock(&isert_conn->mutex); -out: return 0; } @@ -934,12 +918,16 @@ isert_connect_error(struct rdma_cm_id *cma_id) static int isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) { + struct isert_np *isert_np = cma_id->context; int ret = 0; isert_info("%s (%d): status %d id %p np %p\n", rdma_event_msg(event->event), event->event, event->status, cma_id, cma_id->context); + if (isert_np->cm_id == cma_id) + return isert_np_cma_handler(cma_id->context, event->event); + switch (event->event) { case RDMA_CM_EVENT_CONNECT_REQUEST: ret = isert_connect_request(cma_id, event); @@ -977,7 +965,8 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count) for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { rx_desc = &isert_conn->rx_descs[i]; - rx_wr->wr_id = (uintptr_t)rx_desc; + + rx_wr->wr_cqe = &rx_desc->rx_cqe; rx_wr->sg_list = &rx_desc->rx_sg; rx_wr->num_sge = 1; rx_wr->next = rx_wr + 1; @@ -985,13 +974,10 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count) rx_wr--; rx_wr->next = NULL; /* mark end of work requests list */ - isert_conn->post_recv_buf_count += count; ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, &rx_wr_failed); - if (ret) { + if (ret) isert_err("ib_post_recv() failed with ret: %d\n", ret); - isert_conn->post_recv_buf_count -= count; - } return ret; } @@ -1002,23 +988,20 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc) struct ib_recv_wr *rx_wr_failed, rx_wr; int ret; - rx_wr.wr_id = (uintptr_t)rx_desc; + rx_wr.wr_cqe = &rx_desc->rx_cqe; rx_wr.sg_list = &rx_desc->rx_sg; rx_wr.num_sge = 1; rx_wr.next = NULL; - isert_conn->post_recv_buf_count++; ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed); - if (ret) { + if (ret) isert_err("ib_post_recv() failed with ret: %d\n", ret); - isert_conn->post_recv_buf_count--; - } return ret; } static int -isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) +isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) { struct ib_device *ib_dev = isert_conn->cm_id->device; struct ib_send_wr send_wr, *send_wr_failed; @@ -1027,8 +1010,10 @@ isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); + tx_desc->tx_cqe.done = isert_login_send_done; + send_wr.next = NULL; - send_wr.wr_id = (uintptr_t)tx_desc; + send_wr.wr_cqe = &tx_desc->tx_cqe; send_wr.sg_list = tx_desc->tx_sg; send_wr.num_sge = tx_desc->num_sge; send_wr.opcode = IB_WR_SEND; @@ -1056,7 +1041,6 @@ isert_create_send_desc(struct isert_conn *isert_conn, tx_desc->iser_header.flags = ISCSI_CTRL; tx_desc->num_sge = 1; - tx_desc->isert_cmd = isert_cmd; if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) { tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey; @@ -1097,8 +1081,9 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, { struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc; - isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND; - send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc; + isert_cmd->iser_ib_op = ISER_IB_SEND; + tx_desc->tx_cqe.done = isert_send_done; + send_wr->wr_cqe = &tx_desc->tx_cqe; if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) { send_wr->opcode = IB_WR_SEND_WITH_INV; @@ -1113,7 +1098,7 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, } static int -isert_rdma_post_recvl(struct isert_conn *isert_conn) +isert_login_post_recv(struct isert_conn *isert_conn) { struct ib_recv_wr rx_wr, *rx_wr_fail; struct ib_sge sge; @@ -1121,23 +1106,22 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn) memset(&sge, 0, sizeof(struct ib_sge)); sge.addr = isert_conn->login_req_dma; - sge.length = ISER_RX_LOGIN_SIZE; + sge.length = ISER_RX_PAYLOAD_SIZE; sge.lkey = isert_conn->device->pd->local_dma_lkey; isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n", sge.addr, sge.length, sge.lkey); + isert_conn->login_req_buf->rx_cqe.done = isert_login_recv_done; + memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); - rx_wr.wr_id = (uintptr_t)isert_conn->login_req_buf; + rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe; rx_wr.sg_list = &sge; rx_wr.num_sge = 1; - isert_conn->post_recv_buf_count++; ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail); - if (ret) { + if (ret) isert_err("ib_post_recv() failed: %d\n", ret); - isert_conn->post_recv_buf_count--; - } return ret; } @@ -1203,12 +1187,12 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, goto post_send; } - ret = isert_rdma_post_recvl(isert_conn); + ret = isert_login_post_recv(isert_conn); if (ret) return ret; } post_send: - ret = isert_post_send(isert_conn, tx_desc); + ret = isert_login_post_send(isert_conn, tx_desc); if (ret) return ret; @@ -1218,7 +1202,7 @@ post_send: static void isert_rx_login_req(struct isert_conn *isert_conn) { - struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf; + struct iser_rx_desc *rx_desc = isert_conn->login_req_buf; int rx_buflen = isert_conn->login_req_len; struct iscsi_conn *conn = isert_conn->conn; struct iscsi_login *login = conn->conn_login; @@ -1551,12 +1535,42 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, } static void -isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn) +isert_print_wc(struct ib_wc *wc, const char *type) +{ + if (wc->status != IB_WC_WR_FLUSH_ERR) + isert_err("%s failure: %s (%d) vend_err %x\n", type, + ib_wc_status_msg(wc->status), wc->status, + wc->vendor_err); + else + isert_dbg("%s failure: %s (%d)\n", type, + ib_wc_status_msg(wc->status), wc->status); +} + +static void +isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) { + struct isert_conn *isert_conn = wc->qp->qp_context; + struct ib_device *ib_dev = isert_conn->cm_id->device; + struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe); + struct iscsi_hdr *hdr = &rx_desc->iscsi_header; struct iser_ctrl *iser_ctrl = &rx_desc->iser_header; uint64_t read_va = 0, write_va = 0; uint32_t read_stag = 0, write_stag = 0; + if (unlikely(wc->status != IB_WC_SUCCESS)) { + isert_print_wc(wc, "recv"); + if (wc->status != IB_WC_WR_FLUSH_ERR) + iscsit_cause_connection_reinstatement(isert_conn->conn, 0); + return; + } + + ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr, + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + + isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", + rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags, + (int)(wc->byte_len - ISER_HEADERS_LEN)); + switch (iser_ctrl->flags & 0xF0) { case ISCSI_CTRL: if (iser_ctrl->flags & ISER_RSV) { @@ -1584,56 +1598,40 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn) isert_rx_opcode(isert_conn, rx_desc, read_stag, read_va, write_stag, write_va); + + ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr, + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); } static void -isert_rcv_completion(struct iser_rx_desc *desc, - struct isert_conn *isert_conn, - u32 xfer_len) +isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) { + struct isert_conn *isert_conn = wc->qp->qp_context; struct ib_device *ib_dev = isert_conn->cm_id->device; - struct iscsi_hdr *hdr; - u64 rx_dma; - int rx_buflen; - - if ((char *)desc == isert_conn->login_req_buf) { - rx_dma = isert_conn->login_req_dma; - rx_buflen = ISER_RX_LOGIN_SIZE; - isert_dbg("login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", - rx_dma, rx_buflen); - } else { - rx_dma = desc->dma_addr; - rx_buflen = ISER_RX_PAYLOAD_SIZE; - isert_dbg("req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", - rx_dma, rx_buflen); + + if (unlikely(wc->status != IB_WC_SUCCESS)) { + isert_print_wc(wc, "login recv"); + return; } - ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE); + ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma, + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); - hdr = &desc->iscsi_header; - isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", - hdr->opcode, hdr->itt, hdr->flags, - (int)(xfer_len - ISER_HEADERS_LEN)); + isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN; - if ((char *)desc == isert_conn->login_req_buf) { - isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN; - if (isert_conn->conn) { - struct iscsi_login *login = isert_conn->conn->conn_login; + if (isert_conn->conn) { + struct iscsi_login *login = isert_conn->conn->conn_login; - if (login && !login->first_request) - isert_rx_login_req(isert_conn); - } - mutex_lock(&isert_conn->mutex); - complete(&isert_conn->login_req_comp); - mutex_unlock(&isert_conn->mutex); - } else { - isert_rx_do_work(desc, isert_conn); + if (login && !login->first_request) + isert_rx_login_req(isert_conn); } - ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen, - DMA_FROM_DEVICE); + mutex_lock(&isert_conn->mutex); + complete(&isert_conn->login_req_comp); + mutex_unlock(&isert_conn->mutex); - isert_conn->post_recv_buf_count--; + ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma, + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); } static int @@ -1683,54 +1681,50 @@ isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data) static void isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) { - struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; - isert_dbg("Cmd %p\n", isert_cmd); - if (wr->data.sg) { + if (isert_cmd->data.sg) { isert_dbg("Cmd %p unmap_sg op\n", isert_cmd); - isert_unmap_data_buf(isert_conn, &wr->data); + isert_unmap_data_buf(isert_conn, &isert_cmd->data); } - if (wr->rdma_wr) { + if (isert_cmd->rdma_wr) { isert_dbg("Cmd %p free send_wr\n", isert_cmd); - kfree(wr->rdma_wr); - wr->rdma_wr = NULL; + kfree(isert_cmd->rdma_wr); + isert_cmd->rdma_wr = NULL; } - if (wr->ib_sge) { + if (isert_cmd->ib_sge) { isert_dbg("Cmd %p free ib_sge\n", isert_cmd); - kfree(wr->ib_sge); - wr->ib_sge = NULL; + kfree(isert_cmd->ib_sge); + isert_cmd->ib_sge = NULL; } } static void isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) { - struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; - isert_dbg("Cmd %p\n", isert_cmd); - if (wr->fr_desc) { - isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, wr->fr_desc); - if (wr->fr_desc->ind & ISERT_PROTECTED) { - isert_unmap_data_buf(isert_conn, &wr->prot); - wr->fr_desc->ind &= ~ISERT_PROTECTED; + if (isert_cmd->fr_desc) { + isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, isert_cmd->fr_desc); + if (isert_cmd->fr_desc->ind & ISERT_PROTECTED) { + isert_unmap_data_buf(isert_conn, &isert_cmd->prot); + isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED; } spin_lock_bh(&isert_conn->pool_lock); - list_add_tail(&wr->fr_desc->list, &isert_conn->fr_pool); + list_add_tail(&isert_cmd->fr_desc->list, &isert_conn->fr_pool); spin_unlock_bh(&isert_conn->pool_lock); - wr->fr_desc = NULL; + isert_cmd->fr_desc = NULL; } - if (wr->data.sg) { + if (isert_cmd->data.sg) { isert_dbg("Cmd %p unmap_sg op\n", isert_cmd); - isert_unmap_data_buf(isert_conn, &wr->data); + isert_unmap_data_buf(isert_conn, &isert_cmd->data); } - wr->ib_sge = NULL; - wr->rdma_wr = NULL; + isert_cmd->ib_sge = NULL; + isert_cmd->rdma_wr = NULL; } static void @@ -1882,52 +1876,70 @@ fail_mr_status: } static void -isert_completion_rdma_write(struct iser_tx_desc *tx_desc, - struct isert_cmd *isert_cmd) +isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) { - struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; - struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; - struct se_cmd *se_cmd = &cmd->se_cmd; - struct isert_conn *isert_conn = isert_cmd->conn; + struct isert_conn *isert_conn = wc->qp->qp_context; struct isert_device *device = isert_conn->device; + struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); + struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc); + struct se_cmd *cmd = &isert_cmd->iscsi_cmd->se_cmd; int ret = 0; - if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) { - ret = isert_check_pi_status(se_cmd, - |
