summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-01 11:22:35 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-01 11:22:35 -0700
commit08c521a2011ff492490aa9ed6cc574be4235ce2b (patch)
tree505d3ccd330b35c6f1e55065c28770e8e15128c6
parent694752922b12bd318aa80191bd9d8c3dcfb39055 (diff)
parentb06e13c38dbd5a03e945ce711f6909c91888f507 (diff)
downloadlinux-08c521a2011ff492490aa9ed6cc574be4235ce2b.tar.gz
linux-08c521a2011ff492490aa9ed6cc574be4235ce2b.tar.bz2
linux-08c521a2011ff492490aa9ed6cc574be4235ce2b.zip
Merge branch 'for-4.12/post-merge' of git://git.kernel.dk/linux-block
Pull second round of block layer updates from Jens Axboe: - Further fixups to the NVMe APST code, from Andy. - Various fixes for (mostly) nvme-fc, from Christoph and James. - NVMe scsi fixes from Jon and Christoph. * 'for-4.12/post-merge' of git://git.kernel.dk/linux-block: (39 commits) nvme-scsi: remove nvme_trans_security_protocol nvme-lightnvm: add missing endianess conversion in nvme_nvm_end_io nvme-scsi: Consider LBA format in IO splitting calculation nvme-fc: avoid memory corruption caused by calling nvmf_free_options() twice lpfc: Fix memory corruption of the lpfc_ncmd->list pointers nvme: Add nvme_core.force_apst to ignore the NO_APST quirk nvme: Display raw APST configuration via DYNAMIC_DEBUG nvme: Fix APST comment lpfc revison 11.2.0.12 Fix Express lane queue creation. Update ABORT processing for NVMET. Fix implicit logo and RSCN handling for NVMET Add Fabric assigned WWN support. Fix max_sgl_segments settings for NVME / NVMET Fix crash after issuing lip reset Fix driver load issues when MRQ=8 Remove hba lock from NVMET issue WQE. Fix nvme initiator handling when not enabled. Fix driver usage of 128B WQEs when WQ_CREATE is V1. Fix driver unload/reload operation. ...
-rw-r--r--drivers/nvme/host/core.c38
-rw-r--r--drivers/nvme/host/fc.c1089
-rw-r--r--drivers/nvme/host/lightnvm.c2
-rw-r--r--drivers/nvme/host/scsi.c15
-rw-r--r--drivers/nvme/target/fc.c8
-rw-r--r--drivers/nvme/target/fcloop.c4
-rw-r--r--drivers/scsi/lpfc/lpfc.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h9
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c68
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c67
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c68
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c133
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c202
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c157
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c377
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c3
28 files changed, 1612 insertions, 721 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index bf6729b1d8bf..d5e0906262ea 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -61,6 +61,10 @@ module_param(default_ps_max_latency_us, ulong, 0644);
MODULE_PARM_DESC(default_ps_max_latency_us,
"max power saving latency for new devices; use PM QOS to change per device");
+static bool force_apst;
+module_param(force_apst, bool, 0644);
+MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
+
static LIST_HEAD(nvme_ctrl_list);
static DEFINE_SPINLOCK(dev_list_lock);
@@ -1325,7 +1329,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
* heuristic: we are willing to spend at most 2% of the time
* transitioning between power states. Therefore, when running
* in any given state, we will enter the next lower-power
- * non-operational state after waiting 100 * (enlat + exlat)
+ * non-operational state after waiting 50 * (enlat + exlat)
* microseconds, as long as that state's total latency is under
* the requested maximum latency.
*
@@ -1336,6 +1340,8 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
unsigned apste;
struct nvme_feat_auto_pst *table;
+ u64 max_lat_us = 0;
+ int max_ps = -1;
int ret;
/*
@@ -1357,6 +1363,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
if (ctrl->ps_max_latency_us == 0) {
/* Turn off APST. */
apste = 0;
+ dev_dbg(ctrl->device, "APST disabled\n");
} else {
__le64 target = cpu_to_le64(0);
int state;
@@ -1406,9 +1413,22 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
target = cpu_to_le64((state << 3) |
(transition_ms << 8));
+
+ if (max_ps == -1)
+ max_ps = state;
+
+ if (total_latency_us > max_lat_us)
+ max_lat_us = total_latency_us;
}
apste = 1;
+
+ if (max_ps == -1) {
+ dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
+ } else {
+ dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
+ max_ps, max_lat_us, (int)sizeof(*table), table);
+ }
}
ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
@@ -1546,6 +1566,11 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
}
}
+ if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
+ dev_warn(ctrl->dev, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
+ ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
+ }
+
ctrl->oacs = le16_to_cpu(id->oacs);
ctrl->vid = le16_to_cpu(id->vid);
ctrl->oncs = le16_to_cpup(&id->oncs);
@@ -1568,7 +1593,16 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->npss = id->npss;
prev_apsta = ctrl->apsta;
- ctrl->apsta = (ctrl->quirks & NVME_QUIRK_NO_APST) ? 0 : id->apsta;
+ if (ctrl->quirks & NVME_QUIRK_NO_APST) {
+ if (force_apst && id->apsta) {
+ dev_warn(ctrl->dev, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
+ ctrl->apsta = 1;
+ } else {
+ ctrl->apsta = 0;
+ }
+ } else {
+ ctrl->apsta = id->apsta;
+ }
memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
if (ctrl->ops->is_fabrics) {
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index ecc1048de837..4976db56e351 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -19,6 +19,7 @@
#include <linux/parser.h>
#include <uapi/scsi/fc/fc_fs.h>
#include <uapi/scsi/fc/fc_els.h>
+#include <linux/delay.h>
#include "nvme.h"
#include "fabrics.h"
@@ -44,6 +45,8 @@ enum nvme_fc_queue_flags {
#define NVMEFC_QUEUE_DELAY 3 /* ms units */
+#define NVME_FC_MAX_CONNECT_ATTEMPTS 1
+
struct nvme_fc_queue {
struct nvme_fc_ctrl *ctrl;
struct device *dev;
@@ -65,6 +68,7 @@ enum nvme_fcop_flags {
FCOP_FLAGS_TERMIO = (1 << 0),
FCOP_FLAGS_RELEASED = (1 << 1),
FCOP_FLAGS_COMPLETE = (1 << 2),
+ FCOP_FLAGS_AEN = (1 << 3),
};
struct nvmefc_ls_req_op {
@@ -86,6 +90,7 @@ enum nvme_fcpop_state {
FCPOP_STATE_IDLE = 1,
FCPOP_STATE_ACTIVE = 2,
FCPOP_STATE_ABORTED = 3,
+ FCPOP_STATE_COMPLETE = 4,
};
struct nvme_fc_fcp_op {
@@ -104,6 +109,7 @@ struct nvme_fc_fcp_op {
struct request *rq;
atomic_t state;
+ u32 flags;
u32 rqno;
u32 nents;
@@ -134,19 +140,17 @@ struct nvme_fc_rport {
struct kref ref;
} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
-enum nvme_fcctrl_state {
- FCCTRL_INIT = 0,
- FCCTRL_ACTIVE = 1,
+enum nvme_fcctrl_flags {
+ FCCTRL_TERMIO = (1 << 0),
};
struct nvme_fc_ctrl {
spinlock_t lock;
struct nvme_fc_queue *queues;
- u32 queue_count;
-
struct device *dev;
struct nvme_fc_lport *lport;
struct nvme_fc_rport *rport;
+ u32 queue_count;
u32 cnum;
u64 association_id;
@@ -159,8 +163,14 @@ struct nvme_fc_ctrl {
struct blk_mq_tag_set tag_set;
struct work_struct delete_work;
+ struct work_struct reset_work;
+ struct delayed_work connect_work;
+ int reconnect_delay;
+ int connect_attempts;
+
struct kref ref;
- int state;
+ u32 flags;
+ u32 iocnt;
struct nvme_fc_fcp_op aen_ops[NVME_FC_NR_AEN_COMMANDS];
@@ -1132,6 +1142,7 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
/* *********************** NVME Ctrl Routines **************************** */
+static void __nvme_fc_final_op_cleanup(struct request *rq);
static int
nvme_fc_reinit_request(void *data, struct request *rq)
@@ -1169,21 +1180,84 @@ nvme_fc_exit_request(void *data, struct request *rq,
return __nvme_fc_exit_request(data, op);
}
+static int
+__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
+{
+ int state;
+
+ state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
+ if (state != FCPOP_STATE_ACTIVE) {
+ atomic_set(&op->state, state);
+ return -ECANCELED;
+ }
+
+ ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
+ &ctrl->rport->remoteport,
+ op->queue->lldd_handle,
+ &op->fcp_req);
+
+ return 0;
+}
+
static void
-nvme_fc_exit_aen_ops(struct nvme_fc_ctrl *ctrl)
+nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
{
struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
- int i;
+ unsigned long flags;
+ int i, ret;
for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
- if (atomic_read(&aen_op->state) == FCPOP_STATE_UNINIT)
+ if (atomic_read(&aen_op->state) != FCPOP_STATE_ACTIVE)
continue;
- __nvme_fc_exit_request(ctrl, aen_op);
- nvme_fc_ctrl_put(ctrl);
+
+ spin_lock_irqsave(&ctrl->lock, flags);
+ if (ctrl->flags & FCCTRL_TERMIO) {
+ ctrl->iocnt++;
+ aen_op->flags |= FCOP_FLAGS_TERMIO;
+ }
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ ret = __nvme_fc_abort_op(ctrl, aen_op);
+ if (ret) {
+ /*
+ * if __nvme_fc_abort_op failed the io wasn't
+ * active. Thus this call path is running in
+ * parallel to the io complete. Treat as non-error.
+ */
+
+ /* back out the flags/counters */
+ spin_lock_irqsave(&ctrl->lock, flags);
+ if (ctrl->flags & FCCTRL_TERMIO)
+ ctrl->iocnt--;
+ aen_op->flags &= ~FCOP_FLAGS_TERMIO;
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+ return;
+ }
+ }
+}
+
+static inline int
+__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
+ struct nvme_fc_fcp_op *op)
+{
+ unsigned long flags;
+ bool complete_rq = false;
+
+ spin_lock_irqsave(&ctrl->lock, flags);
+ if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
+ if (ctrl->flags & FCCTRL_TERMIO)
+ ctrl->iocnt--;
}
+ if (op->flags & FCOP_FLAGS_RELEASED)
+ complete_rq = true;
+ else
+ op->flags |= FCOP_FLAGS_COMPLETE;
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ return complete_rq;
}
-void
+static void
nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
{
struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
@@ -1192,8 +1266,10 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
struct nvme_fc_ctrl *ctrl = op->ctrl;
struct nvme_fc_queue *queue = op->queue;
struct nvme_completion *cqe = &op->rsp_iu.cqe;
+ struct nvme_command *sqe = &op->cmd_iu.sqe;
__le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
union nvme_result result;
+ bool complete_rq;
/*
* WARNING:
@@ -1274,7 +1350,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
be32_to_cpu(op->rsp_iu.xfrd_len) !=
freq->transferred_length ||
op->rsp_iu.status_code ||
- op->rqno != le16_to_cpu(cqe->command_id))) {
+ sqe->common.command_id != cqe->command_id)) {
status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
goto done;
}
@@ -1288,13 +1364,25 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
}
done:
- if (!queue->qnum && op->rqno >= AEN_CMDID_BASE) {
+ if (op->flags & FCOP_FLAGS_AEN) {
nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
+ complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
+ atomic_set(&op->state, FCPOP_STATE_IDLE);
+ op->flags = FCOP_FLAGS_AEN; /* clear other flags */
nvme_fc_ctrl_put(ctrl);
return;
}
- nvme_end_request(rq, status, result);
+ complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
+ if (!complete_rq) {
+ if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
+ status = cpu_to_le16(NVME_SC_ABORT_REQ);
+ if (blk_queue_dying(rq->q))
+ status |= cpu_to_le16(NVME_SC_DNR);
+ }
+ nvme_end_request(rq, status, result);
+ } else
+ __nvme_fc_final_op_cleanup(rq);
}
static int
@@ -1375,25 +1463,55 @@ nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
struct nvme_fc_fcp_op *aen_op;
struct nvme_fc_cmd_iu *cmdiu;
struct nvme_command *sqe;
+ void *private;
int i, ret;
aen_op = ctrl->aen_ops;
for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
+ private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
+ GFP_KERNEL);
+ if (!private)
+ return -ENOMEM;
+
cmdiu = &aen_op->cmd_iu;
sqe = &cmdiu->sqe;
ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
aen_op, (struct request *)NULL,
(AEN_CMDID_BASE + i));
- if (ret)
+ if (ret) {
+ kfree(private);
return ret;
+ }
+
+ aen_op->flags = FCOP_FLAGS_AEN;
+ aen_op->fcp_req.first_sgl = NULL; /* no sg list */
+ aen_op->fcp_req.private = private;
memset(sqe, 0, sizeof(*sqe));
sqe->common.opcode = nvme_admin_async_event;
+ /* Note: core layer may overwrite the sqe.command_id value */
sqe->common.command_id = AEN_CMDID_BASE + i;
}
return 0;
}
+static void
+nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
+{
+ struct nvme_fc_fcp_op *aen_op;
+ int i;
+
+ aen_op = ctrl->aen_ops;
+ for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
+ if (!aen_op->fcp_req.private)
+ continue;
+
+ __nvme_fc_exit_request(ctrl, aen_op);
+
+ kfree(aen_op->fcp_req.private);
+ aen_op->fcp_req.private = NULL;
+ }
+}
static inline void
__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
@@ -1493,15 +1611,6 @@ __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
}
static void
-nvme_fc_destroy_admin_queue(struct nvme_fc_ctrl *ctrl)
-{
- __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
- blk_cleanup_queue(ctrl->ctrl.admin_q);
- blk_mq_free_tag_set(&ctrl->admin_tag_set);
- nvme_fc_free_queue(&ctrl->queues[0]);
-}
-
-static void
nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
{
int i;
@@ -1588,19 +1697,27 @@ nvme_fc_ctrl_free(struct kref *ref)
container_of(ref, struct nvme_fc_ctrl, ref);
unsigned long flags;
- if (ctrl->state != FCCTRL_INIT) {
- /* remove from rport list */
- spin_lock_irqsave(&ctrl->rport->lock, flags);
- list_del(&ctrl->ctrl_list);
- spin_unlock_irqrestore(&ctrl->rport->lock, flags);
+ if (ctrl->ctrl.tagset) {
+ blk_cleanup_queue(ctrl->ctrl.connect_q);
+ blk_mq_free_tag_set(&ctrl->tag_set);
}
+ /* remove from rport list */
+ spin_lock_irqsave(&ctrl->rport->lock, flags);
+ list_del(&ctrl->ctrl_list);
+ spin_unlock_irqrestore(&ctrl->rport->lock, flags);
+
+ blk_cleanup_queue(ctrl->ctrl.admin_q);
+ blk_mq_free_tag_set(&ctrl->admin_tag_set);
+
+ kfree(ctrl->queues);
+
put_device(ctrl->dev);
nvme_fc_rport_put(ctrl->rport);
- kfree(ctrl->queues);
ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
- nvmf_free_options(ctrl->ctrl.opts);
+ if (ctrl->ctrl.opts)
+ nvmf_free_options(ctrl->ctrl.opts);
kfree(ctrl);
}
@@ -1621,57 +1738,38 @@ nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
* controller. Called after last nvme_put_ctrl() call
*/
static void
-nvme_fc_free_nvme_ctrl(struct nvme_ctrl *nctrl)
+nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
WARN_ON(nctrl != &ctrl->ctrl);
- /*
- * Tear down the association, which will generate link
- * traffic to terminate connections
- */
-
- if (ctrl->state != FCCTRL_INIT) {
- /* send a Disconnect(association) LS to fc-nvme target */
- nvme_fc_xmt_disconnect_assoc(ctrl);
-
- if (ctrl->ctrl.tagset) {
- blk_cleanup_queue(ctrl->ctrl.connect_q);
- blk_mq_free_tag_set(&ctrl->tag_set);
- nvme_fc_delete_hw_io_queues(ctrl);
- nvme_fc_free_io_queues(ctrl);
- }
-
- nvme_fc_exit_aen_ops(ctrl);
-
- nvme_fc_destroy_admin_queue(ctrl);
- }
-
nvme_fc_ctrl_put(ctrl);
}
-
-static int
-__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
+static void
+nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
{
- int state;
+ dev_warn(ctrl->ctrl.device,
+ "NVME-FC{%d}: transport association error detected: %s\n",
+ ctrl->cnum, errmsg);
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
- state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
- if (state != FCPOP_STATE_ACTIVE) {
- atomic_set(&op->state, state);
- return -ECANCELED; /* fail */
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
+ dev_err(ctrl->ctrl.device,
+ "NVME-FC{%d}: error_recovery: Couldn't change state "
+ "to RECONNECTING\n", ctrl->cnum);
+ return;
}
- ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
- &ctrl->rport->remoteport,
- op->queue->lldd_handle,
- &op->fcp_req);
-
- return 0;
+ if (!queue_work(nvme_fc_wq, &ctrl->reset_work))
+ dev_err(ctrl->ctrl.device,
+ "NVME-FC{%d}: error_recovery: Failed to schedule "
+ "reset work\n", ctrl->cnum);
}
-enum blk_eh_timer_return
+static enum blk_eh_timer_return
nvme_fc_timeout(struct request *rq, bool reserved)
{
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
@@ -1687,11 +1785,13 @@ nvme_fc_timeout(struct request *rq, bool reserved)
return BLK_EH_HANDLED;
/*
- * TODO: force a controller reset
- * when that happens, queues will be torn down and outstanding
- * ios will be terminated, and the above abort, on a single io
- * will no longer be needed.
+ * we can't individually ABTS an io without affecting the queue,
+ * thus killing the queue, adn thus the association.
+ * So resolve by performing a controller reset, which will stop
+ * the host/io stack, terminate the association on the link,
+ * and recreate an association on the link.
*/
+ nvme_fc_error_recovery(ctrl, "io timeout error");
return BLK_EH_HANDLED;
}
@@ -1785,6 +1885,13 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
u32 csn;
int ret;
+ /*
+ * before attempting to send the io, check to see if we believe
+ * the target device is present
+ */
+ if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
+ return BLK_MQ_RQ_QUEUE_ERROR;
+
if (!nvme_fc_ctrl_get(ctrl))
return BLK_MQ_RQ_QUEUE_ERROR;
@@ -1829,14 +1936,9 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
sqe->rw.dptr.sgl.addr = 0;
- /* odd that we set the command_id - should come from nvme-fabrics */
- WARN_ON_ONCE(sqe->common.command_id != cpu_to_le16(op->rqno));
-
- if (op->rq) { /* skipped on aens */
+ if (!(op->flags & FCOP_FLAGS_AEN)) {
ret = nvme_fc_map_data(ctrl, op->rq, op);
if (ret < 0) {
- dev_err(queue->ctrl->ctrl.device,
- "Failed to map data (%d)\n", ret);
nvme_cleanup_cmd(op->rq);
nvme_fc_ctrl_put(ctrl);
return (ret == -ENOMEM || ret == -EAGAIN) ?
@@ -1849,7 +1951,7 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
atomic_set(&op->state, FCPOP_STATE_ACTIVE);
- if (op->rq)
+ if (!(op->flags & FCOP_FLAGS_AEN))
blk_mq_start_request(op->rq);
ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
@@ -1857,9 +1959,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
queue->lldd_handle, &op->fcp_req);
if (ret) {
- dev_err(ctrl->dev,
- "Send nvme command failed - lldd returned %d.\n", ret);
-
if (op->rq) { /* normal request */
nvme_fc_unmap_data(ctrl, op->rq, op);
nvme_cleanup_cmd(op->rq);
@@ -1929,12 +2028,8 @@ nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
struct nvme_fc_fcp_op *op;
req = blk_mq_tag_to_rq(nvme_fc_tagset(queue), tag);
- if (!req) {
- dev_err(queue->ctrl->ctrl.device,
- "tag 0x%x on QNum %#x not found\n",
- tag, queue->qnum);
+ if (!req)
return 0;
- }
op = blk_mq_rq_to_pdu(req);
@@ -1951,11 +2046,21 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
struct nvme_fc_fcp_op *aen_op;
+ unsigned long flags;
+ bool terminating = false;
int ret;
if (aer_idx > NVME_FC_NR_AEN_COMMANDS)
return;
+ spin_lock_irqsave(&ctrl->lock, flags);
+ if (ctrl->flags & FCCTRL_TERMIO)
+ terminating = true;
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ if (terminating)
+ return;
+
aen_op = &ctrl->aen_ops[aer_idx];
ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
@@ -1966,13 +2071,14 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
}
static void
-nvme_fc_complete_rq(struct request *rq)
+__nvme_fc_final_op_cleanup(struct request *rq)
{
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
struct nvme_fc_ctrl *ctrl = op->ctrl;
- int state;
- state = atomic_xchg(&op->state, FCPOP_STATE_IDLE);
+ atomic_set(&op->state, FCPOP_STATE_IDLE);
+ op->flags &= ~(FCOP_FLAGS_TERMIO | FCOP_FLAGS_RELEASED |
+ FCOP_FLAGS_COMPLETE);
nvme_cleanup_cmd(rq);
nvme_fc_unmap_data(ctrl, rq, op);
@@ -1981,6 +2087,84 @@ nvme_fc_complete_rq(struct request *rq)
}
+static void
+nvme_fc_complete_rq(struct request *rq)
+{
+ struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+ struct nvme_fc_ctrl *ctrl = op->ctrl;
+ unsigned long flags;
+ bool completed = false;
+
+ /*
+ * the core layer, on controller resets after calling
+ * nvme_shutdown_ctrl(), calls complete_rq without our
+ * calling blk_mq_complete_request(), thus there may still
+ * be live i/o outstanding with the LLDD. Means transport has
+ * to track complete calls vs fcpio_done calls to know what
+ * path to take on completes and dones.
+ */
+ spin_lock_irqsave(&ctrl->lock, flags);
+ if (op->flags & FCOP_FLAGS_COMPLETE)
+ completed = true;
+ else
+ op->flags |= FCOP_FLAGS_RELEASED;
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ if (completed)
+ __nvme_fc_final_op_cleanup(rq);
+}
+
+/*
+ * This routine is used by the transport when it needs to find active
+ * io on a queue that is to be terminated. The transport uses
+ * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
+ * this routine to kill them on a 1 by 1 basis.
+ *
+ * As FC allocates FC exchange for each io, the transport must contact
+ * the LLDD to terminate the exchange, thus releasing the FC exchange.
+ * After terminating the exchange the LLDD will call the transport's
+ * normal io done path for the request, but it will have an aborted
+ * status. The done path will return the io request back to the block
+ * layer with an error status.
+ */
+static void
+nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
+{
+ struct nvme_ctrl *nctrl = data;
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
+ struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
+ unsigned long flags;
+ int status;
+
+ if (!blk_mq_request_started(req))
+ return;
+
+ spin_lock_irqsave(&ctrl->lock, flags);
+ if (ctrl->flags & FCCTRL_TERMIO) {
+ ctrl->iocnt++;
+ op->flags |= FCOP_FLAGS_TERMIO;
+ }
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ status = __nvme_fc_abort_op(ctrl, op);
+ if (status) {
+ /*
+ * if __nvme_fc_abort_op failed the io wasn't
+ * active. Thus this call path is running in
+ * parallel to the io complete. Treat as non-error.
+ */
+
+ /* back out the flags/counters */
+ spin_lock_irqsave(&ctrl->lock, flags);
+ if (ctrl->flags & FCCTRL_TERMIO)
+ ctrl->iocnt--;
+ op->flags &= ~FCOP_FLAGS_TERMIO;
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+ return;
+ }
+}
+
+
static const struct blk_mq_ops nvme_fc_mq_ops = {
.queue_rq = nvme_fc_queue_rq,
.complete = nvme_fc_complete_rq,
@@ -1992,145 +2176,275 @@ static const struct blk_mq_ops nvme_fc_mq_ops = {
.timeout = nvme_fc_timeout,
};
-static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
- .queue_rq = nvme_fc_queue_rq,
- .complete = nvme_fc_complete_rq,
- .init_request = nvme_fc_init_admin_request,
- .exit_request = nvme_fc_exit_request,
- .reinit_request = nvme_fc_reinit_request,
- .init_hctx = nvme_fc_init_admin_hctx,
- .timeout = nvme_fc_timeout,
-};
-
static int
-nvme_fc_configure_admin_queue(struct nvme_fc_ctrl *ctrl)
+nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
{
- u32 segs;
- int error;
+ struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+ int ret;
- nvme_fc_init_queue(ctrl, 0, NVME_FC_AQ_BLKMQ_DEPTH);
+ ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
+ if (ret) {
+ dev_info(ctrl->ctrl.device,
+ "set_queue_count failed: %d\n", ret);
+ return ret;
+ }
- error = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
- NVME_FC_AQ_BLKMQ_DEPTH,
- (NVME_FC_AQ_BLKMQ_DEPTH / 4));
- if (error)
- return error;
+ ctrl->queue_count = opts->nr_io_queues + 1;
+ if (!opts->nr_io_queues)
+ return 0;
- memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
- ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
- ctrl->admin_tag_set.queue_depth = NVME_FC_AQ_BLKMQ_DEPTH;
- ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
- ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
- ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
+ dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
+ opts->nr_io_queues);
+
+ nvme_fc_init_io_queues(ctrl);
+
+ memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
+ ctrl->tag_set.ops = &nvme_fc_mq_ops;
+ ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
+ ctrl->tag_set.reserved_tags = 1; /* fabric connect */
+ ctrl->tag_set.numa_node = NUMA_NO_NODE;
+ ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
(SG_CHUNK_SIZE *
sizeof(struct scatterlist)) +
ctrl->lport->ops->fcprqst_priv_sz;
- ctrl->admin_tag_set.driver_data = ctrl;
- ctrl->admin_tag_set.nr_hw_queues = 1;
- ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
+ ctrl->tag_set.driver_data = ctrl;
+ ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
+ ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
- error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
- if (error)
- goto out_free_queue;
+ ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
+ if (ret)
+ return ret;
- ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
- if (IS_ERR(ctrl->ctrl.admin_q)) {
- error = PTR_ERR(ctrl->ctrl.admin_q);
- goto out_free_tagset;
+ ctrl->ctrl.tagset = &ctrl->tag_set;
+
+ ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
+ if (IS_ERR(ctrl->ctrl.connect_q)) {
+ ret = PTR_ERR(ctrl->ctrl.connect_q);
+ goto out_free_tag_set;
+ }
+
+ ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
+ if (ret)
+ goto out_cleanup_blk_queue;
+
+ ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
+ if (ret)
+ goto out_delete_hw_queues;
+
+ return 0;
+
+out_delete_hw_queues:
+ nvme_fc_delete_hw_io_queues(ctrl);
+out_cleanup_blk_queue:
+ nvme_stop_keep_alive(&ctrl->ctrl);
+ blk_cleanup_queue(ctrl->ctrl.connect_q);
+out_free_tag_set:
+ blk_mq_free_tag_set(&ctrl->tag_set);
+ nvme_fc_free_io_queues(ctrl);
+
+ /* force put free routine to ignore io queues */
+ ctrl->ctrl.tagset = NULL;
+
+ return ret;
+}
+
+static int
+nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
+{
+ struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+ int ret;
+
+ ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
+ if (ret) {
+ dev_info(ctrl->ctrl.device,
+ "set_queue_count failed: %d\n", ret);
+ return ret;
}
- error = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
+ /* check for io queues existing */
+ if (ctrl->queue_count == 1)
+ return 0;
+
+ dev_info(ctrl->ctrl.device, "Recreating %d I/O queues.\n",
+ opts->nr_io_queues);
+
+ nvme_fc_init_io_queues(ctrl);
+