Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKernel Build Daemon <kbuild@suse.de>2018-06-30 07:00:32 +0200
committerKernel Build Daemon <kbuild@suse.de>2018-06-30 07:00:32 +0200
commit03d2a463286b45d66a7003003d6aa7d83a02a7a9 (patch)
tree24ed71890a03fec258a6e2374fb00730f3b50d4c
parentb7a0022ff3663655be061f0a040cffb745128ae1 (diff)
parent7573f5a733931ea6b5c7d3196a9fca3117ca0e13 (diff)
Merge branch 'SLE12-SP3' into SLE12-SP3-AZURE
-rw-r--r--blacklist.conf1
-rw-r--r--patches.drivers/nvme-allow-duplicate-controller-if-prior-controller-.patch41
-rw-r--r--patches.drivers/nvme-fabrics-allow-duplicate-connections-to-the-disc.patch49
-rw-r--r--patches.drivers/nvme-fabrics-allow-internal-passthrough-command-on-d.patch130
-rw-r--r--patches.drivers/nvme-fabrics-centralize-discovery-controller-default.patch48
-rw-r--r--patches.drivers/nvme-fabrics-fix-and-refine-state-checks-in-__nvmf_c.patch88
-rw-r--r--patches.drivers/nvme-fabrics-refactor-queue-ready-check.patch212
-rw-r--r--patches.drivers/nvme-fc-change-controllers-first-connect-to-use-reco.patch265
-rw-r--r--patches.drivers/nvme-fc-fix-nulling-of-queue-data-on-reconnect.patch83
-rw-r--r--patches.drivers/nvme-fc-remove-reinit_request-routine.patch77
-rw-r--r--patches.drivers/nvme-fc-remove-setting-DNR-on-exception-conditions.patch44
-rw-r--r--patches.drivers/nvme-move-init-of-keep_alive-work-item-to-controller.patch50
-rw-r--r--patches.drivers/nvmet-fc-increase-LS-buffer-count-per-fc-port.patch36
-rw-r--r--patches.drivers/nvmet-switch-loopback-target-state-to-connecting-whe.patch47
-rw-r--r--patches.drivers/scsi-qedi-Fix-truncation-of-CHAP-name-and-secret.patch80
-rw-r--r--patches.fixes/nvme-expand-nvmf_check_if_ready-checks.patch123
-rw-r--r--patches.kabi/nvme-reimplement-nvmf_check_if_ready-to-avoid-kabi-b.patch53
-rw-r--r--patches.suse/x86-spectre_v1-Disable-compiler-optimizations-over-a.patch79
-rw-r--r--patches.suse/x86-speculation-Fix-up-array_index_nospec_mask-asm-c.patch33
-rw-r--r--series.conf19
20 files changed, 1532 insertions, 26 deletions
diff --git a/blacklist.conf b/blacklist.conf
index 4652a1e283..363f6dbe6a 100644
--- a/blacklist.conf
+++ b/blacklist.conf
@@ -399,3 +399,4 @@ a86b06d1ccd218a6a50d6a3a88fbd2abcd0eaa94 # AFS is unsupported, backport non-triv
9b9322db5c5a1917a66c71fe47c3848a9a31227e # we reverted 657308f73e67
fbfcd0199170984bd3c2812e49ed0fe7b226959a # we don't care about alpha
fc218544fbc800d1c91348ec834cacfb257348f7 # requires major changes to libceph from 4.17
+16001c10725e11b73b8518f42e414506bf73c291 # Preliminary patches are missing
diff --git a/patches.drivers/nvme-allow-duplicate-controller-if-prior-controller-.patch b/patches.drivers/nvme-allow-duplicate-controller-if-prior-controller-.patch
new file mode 100644
index 0000000000..d82a798f11
--- /dev/null
+++ b/patches.drivers/nvme-allow-duplicate-controller-if-prior-controller-.patch
@@ -0,0 +1,41 @@
+From: James Smart <jsmart2021@gmail.com>
+Date: Fri, 25 May 2018 14:02:23 -0700
+Subject: [PATCH] nvme: allow duplicate controller if prior controller being
+ deleted
+References: bsc#1098527
+Git-commit: ab4f47a9f4a12603a1806230d44ead2e54158f85
+Patch-mainline: v4.18-rc1
+
+The current checks for whether a new controller request "matches" an
+existing controller ignores controller state and checks identity strings.
+There are cases where an existing controller may be in its last steps of
+deletion when they are "matched" by a new connection.
+
+Change the behavior so that the new connection ignores controllers that
+are deleted.
+
+Signed-off-by: James Smart <james.smart@broadcom.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/nvme/host/fabrics.h | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
+index 812ee2609fc3..71c494bbaea2 100644
+--- a/drivers/nvme/host/fabrics.h
++++ b/drivers/nvme/host/fabrics.h
+@@ -141,7 +141,9 @@ static inline bool
+ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
+ struct nvmf_ctrl_options *opts)
+ {
+- if (strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
++ if (ctrl->state == NVME_CTRL_DELETING ||
++ ctrl->state == NVME_CTRL_DEAD ||
++ strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
+ strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
+ memcmp(&opts->host->id, &ctrl->opts->host->id, 16))
+ return false;
+--
+2.12.3
+
diff --git a/patches.drivers/nvme-fabrics-allow-duplicate-connections-to-the-disc.patch b/patches.drivers/nvme-fabrics-allow-duplicate-connections-to-the-disc.patch
new file mode 100644
index 0000000000..d0847d6ac1
--- /dev/null
+++ b/patches.drivers/nvme-fabrics-allow-duplicate-connections-to-the-disc.patch
@@ -0,0 +1,49 @@
+From: Hannes Reinecke <hare@suse.de>
+Date: Thu, 24 May 2018 16:18:17 +0200
+Subject: [PATCH] nvme-fabrics: allow duplicate connections to the discovery
+ controller
+References: bsc#1098527
+Git-commit: 181303d03525ea52d2d002fb8ee04e769aaa4ce4
+Patch-mainline: v4.18-rc1
+
+The whole point of the discovery controller is that it can accept
+multiple connections.
+
+[hare: ported to SLE12 SP3]
+
+Signed-off-by: Hannes Reinecke <hare@suse.com>
+Reviewed-by: James Smart <james.smart@broadcom.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/nvme/host/fabrics.c | 1 +
+ drivers/nvme/host/fc.c | 1 +
+ 2 files changed, 2 insertions(+)
+
+diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
+index 27ad2b4d1d32..f9adf660c9f0 100644
+--- a/drivers/nvme/host/fabrics.c
++++ b/drivers/nvme/host/fabrics.c
+@@ -860,6 +860,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
+ if (opts->discovery_nqn) {
+ opts->kato = 0;
+ opts->nr_io_queues = 0;
++ opts->duplicate_connect = true;
+ }
+ if (ctrl_loss_tmo < 0)
+ opts->max_reconnects = -1;
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index 26a10fff6029..ee6245c33c45 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -3130,6 +3130,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
+ }
+
+ if (!opts->duplicate_connect &&
++ !opts->discovery_nqn &&
+ nvme_fc_existing_controller(rport, opts)) {
+ ret = -EALREADY;
+ goto out_fail;
+--
+2.12.3
+
diff --git a/patches.drivers/nvme-fabrics-allow-internal-passthrough-command-on-d.patch b/patches.drivers/nvme-fabrics-allow-internal-passthrough-command-on-d.patch
new file mode 100644
index 0000000000..d07c7406df
--- /dev/null
+++ b/patches.drivers/nvme-fabrics-allow-internal-passthrough-command-on-d.patch
@@ -0,0 +1,130 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Fri, 25 May 2018 15:41:54 +0200
+Subject: [PATCH] nvme-fabrics: allow internal passthrough command on deleting controllers
+References: bsc#1098527
+Git-commit: cc456b65b739e17e8b1e6b61cc2f014f225ba466
+Patch-Mainline: v4.18-rc1
+
+Without this we can't cleanly shut down.
+
+Based on analysis an an earlier patch from Hannes Reinecke.
+
+Fixes: bb06ec31452f ("nvme: expand nvmf_check_if_ready checks")
+Reported-by: Hannes Reinecke <hare@suse.de>
+Tested-by: Hannes Reinecke <hare@suse.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Reviewed-by: James Smart <james.smart@broadcom.com>
+Signed-off-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/nvme/host/fabrics.c | 76 ++++++++++++++++++---------------------------
+ 1 file changed, 31 insertions(+), 45 deletions(-)
+
+diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
+index ad9067b3f237..e39a10337a93 100644
+--- a/drivers/nvme/host/fabrics.c
++++ b/drivers/nvme/host/fabrics.c
+@@ -543,69 +543,55 @@ int nvmf_check_if_ready(struct nvme_ctrl *ctrl,
+ return 0;
+
+ switch (ctrl->state) {
+- case NVME_CTRL_DELETING:
+- goto reject_io;
+-
+ case NVME_CTRL_NEW:
+ case NVME_CTRL_RECONNECTING:
++ case NVME_CTRL_DELETING:
++ /*
++ * This is the case of starting a new or deleting an association
++ * but connectivity was lost before it was fully created or torn
++ * down. We need to error the commands used to initialize the
++ * controller so the reconnect can go into a retry attempt. The
++ * commands should all be marked REQ_FAILFAST_DRIVER, which will
++ * hit the reject path below. Anything else will be queued while
++ * the state settles.
++ */
+ if (!is_connected)
+- /*
+- * This is the case of starting a new
+- * association but connectivity was lost
+- * before it was fully created. We need to
+- * error the commands used to initialize the
+- * controller so the reconnect can go into a
+- * retry attempt. The commands should all be
+- * marked REQ_FAILFAST_DRIVER, which will hit
+- * the reject path below. Anything else will
+- * be queued while the state settles.
+- */
+- goto reject_or_queue_io;
+-
+- if ((queue_live &&
+- !(nvme_req(rq)->flags & NVME_REQ_USERCMD)) ||
+- (!queue_live && rq->cmd_type == REQ_TYPE_DRV_PRIV &&
+- cmd->common.opcode == nvme_fabrics_command &&
+- cmd->fabrics.fctype == nvme_fabrics_type_connect))
+- /*
+- * let anything to a live queue through.
+- * Typically this will be commands to the admin
+- * queue which are either being used to initialize
+- * the controller or are commands being issued
+- * via the cli/ioctl path.
+- *
+- * if the q isn't live, allow only the connect
+- * command through.
+- */
++ break;
++
++ /*
++ * If queue is live, allow only commands that are internally
++ * generated pass through. These are commands on the admin
++ * queue to initialize the controller. This will reject any
++ * ioctl admin cmds received while initializing.
++ */
++ if (queue_live && !(nvme_req(rq)->flags & NVME_REQ_USERCMD))
+ return 0;
+
+ /*
+- * fall-thru to the reject_or_queue_io clause
++ * If the queue is not live, allow only a connect command. This
++ * will reject any ioctl admin cmd as well as initialization
++ * commands if the controller reverted the queue to non-live.
+ */
++ if (!queue_live && rq->cmd_type == REQ_TYPE_DRV_PRIV &&
++ cmd->common.opcode == nvme_fabrics_command &&
++ cmd->fabrics.fctype == nvme_fabrics_type_connect)
++ return 0;
+ break;
+
+- /* these cases fall-thru
+- * case NVME_CTRL_LIVE:
+- * case NVME_CTRL_RESETTING:
+- */
+ default:
+ break;
+ }
+
+-reject_or_queue_io:
+ /*
+- * Any other new io is something we're not in a state to send
+- * to the device. Default action is to busy it and retry it
+- * after the controller state is recovered. However, anything
+- * marked for failfast or nvme multipath is immediately failed.
+- * Note: commands used to initialize the controller will be
+- * marked for failfast.
++ * Any other new io is something we're not in a state to send to the
++ * device. Default action is to busy it and retry it after the
++ * controller state is recovered. However, anything marked for failfast
++ * or nvme multipath is immediately failed. Note: commands used to
++ * initialize the controller will be marked for failfast.
+ * Note: nvme cli/ioctl commands are marked for failfast.
+ */
+ if (!blk_noretry_request(rq))
+ return BLK_MQ_RQ_QUEUE_BUSY; /* try again later */
+-
+-reject_io:
+ nvme_req(rq)->status = NVME_SC_ABORT_REQ;
+ return BLK_MQ_RQ_QUEUE_ERROR;
+ }
+--
+2.12.3
+
diff --git a/patches.drivers/nvme-fabrics-centralize-discovery-controller-default.patch b/patches.drivers/nvme-fabrics-centralize-discovery-controller-default.patch
new file mode 100644
index 0000000000..8f5a1dfac7
--- /dev/null
+++ b/patches.drivers/nvme-fabrics-centralize-discovery-controller-default.patch
@@ -0,0 +1,48 @@
+From: Hannes Reinecke <hare@suse.de>
+Date: Thu, 24 May 2018 16:18:15 +0200
+Subject: [PATCH] nvme-fabrics: centralize discovery controller defaults
+References: bsc#1098527
+Git-commit: 461fbc8f0ed1ac18ce6b7bf7ab0b7444f7c7ff57
+Patch-mainline: v4.18-rc1
+
+When connecting to the discovery controller we have certain defaults
+to observe, so centralize them to avoid inconsistencies due to argument
+ordering.
+
+Signed-off-by: Hannes Reinecke <hare@suse.com>
+Reviewed-by: James Smart <james.smart@broadcom.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/nvme/host/fabrics.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
+index 865b47108d1f..27ad2b4d1d32 100644
+--- a/drivers/nvme/host/fabrics.c
++++ b/drivers/nvme/host/fabrics.c
+@@ -710,10 +710,6 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
+ opts->discovery_nqn =
+ !(strcmp(opts->subsysnqn,
+ NVME_DISC_SUBSYS_NAME));
+- if (opts->discovery_nqn) {
+- opts->kato = 0;
+- opts->nr_io_queues = 0;
+- }
+ break;
+ case NVMF_OPT_TRADDR:
+ p = match_strdup(args);
+@@ -861,6 +857,10 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
+ }
+ }
+
++ if (opts->discovery_nqn) {
++ opts->kato = 0;
++ opts->nr_io_queues = 0;
++ }
+ if (ctrl_loss_tmo < 0)
+ opts->max_reconnects = -1;
+ else
+--
+2.12.3
+
diff --git a/patches.drivers/nvme-fabrics-fix-and-refine-state-checks-in-__nvmf_c.patch b/patches.drivers/nvme-fabrics-fix-and-refine-state-checks-in-__nvmf_c.patch
new file mode 100644
index 0000000000..036d6be060
--- /dev/null
+++ b/patches.drivers/nvme-fabrics-fix-and-refine-state-checks-in-__nvmf_c.patch
@@ -0,0 +1,88 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Mon, 11 Jun 2018 17:41:11 +0200
+Subject: [PATCH] nvme-fabrics: fix and refine state checks in __nvmf_check_ready
+References: bsc#1098527
+Git-commit: 35897b920c8ab5e23331ad429e0aa235528c63ba
+Patch-Mainline: v4.18-rc1
+
+ - make sure we only allow internally generates commands in any non-live
+ state
+ - only allow connect commands on non-live queues when actually in the
+ new or connecting states
+ - treat all other non-live, non-dead states the same as a default
+ cach-all
+
+This fixes a regression where we could not shutdown a controller
+orderly as we didn't allow the internal generated Property Set
+command, and also ensures we don't accidentally let a Connect command
+through in the wrong state.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: James Smart <james.smart@broadcom.com>
+Signed-off-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/nvme/host/fabrics.c | 40 +++++++++++++++++++---------------------
+ 1 file changed, 19 insertions(+), 21 deletions(-)
+
+diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
+index cdd7a48650eb..3422687a0c31 100644
+--- a/drivers/nvme/host/fabrics.c
++++ b/drivers/nvme/host/fabrics.c
+@@ -554,35 +554,33 @@ EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command);
+ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
+ bool queue_live)
+ {
+- struct nvme_command *cmd = nvme_req(rq)->cmd;
++ struct nvme_request *req = nvme_req(rq);
+
++ /*
++ * If we are in some state of setup or teardown only allow
++ * internally generated commands.
++ */
++ if (rq->cmd_type != REQ_TYPE_DRV_PRIV || req->flags & NVME_REQ_USERCMD)
++ return false;
++
++ /*
++ * Only allow commands on a live queue, except for the connect command,
++ * which is require to set the queue live in the appropinquate states.
++ */
+ switch (ctrl->state) {
+ case NVME_CTRL_NEW:
+ case NVME_CTRL_RECONNECTING:
+- case NVME_CTRL_DELETING:
+- /*
+- * If queue is live, allow only commands that are internally
+- * generated pass through. These are commands on the admin
+- * queue to initialize the controller. This will reject any
+- * ioctl admin cmds received while initializing.
+- */
+- if (queue_live && !(nvme_req(rq)->flags & NVME_REQ_USERCMD))
++ if (req->cmd->common.opcode == nvme_fabrics_command &&
++ req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
+ return true;
+-
+- /*
+- * If the queue is not live, allow only a connect command. This
+- * will reject any ioctl admin cmd as well as initialization
+- * commands if the controller reverted the queue to non-live.
+- */
+- if (!queue_live && rq->cmd_type == REQ_TYPE_DRV_PRIV &&
+- cmd->common.opcode == nvme_fabrics_command &&
+- cmd->fabrics.fctype == nvme_fabrics_type_connect)
+- return true;
+- return false;
+-
++ break;
+ default:
++ break;
++ case NVME_CTRL_DEAD:
+ return false;
+ }
++
++ return queue_live;
+ }
+ EXPORT_SYMBOL_GPL(__nvmf_check_ready);
+
+--
+2.12.3
+
diff --git a/patches.drivers/nvme-fabrics-refactor-queue-ready-check.patch b/patches.drivers/nvme-fabrics-refactor-queue-ready-check.patch
new file mode 100644
index 0000000000..3c1ea83896
--- /dev/null
+++ b/patches.drivers/nvme-fabrics-refactor-queue-ready-check.patch
@@ -0,0 +1,212 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Mon, 11 Jun 2018 17:34:06 +0200
+Subject: [PATCH] nvme-fabrics: refactor queue ready check
+References: bsc#1098527
+Git-commit: 3bc32bb1186ccaf3177cbf29caa6cc14dc510b7b
+Patch-Mainline: v4.18-rc1
+
+Move the is_connected check to the fibre channel transport, as it has no
+meaning for other transports. To facilitate this split out a new
+nvmf_fail_nonready_command helper that is called by the transport when
+it is asked to handle a command on a queue that is not ready.
+
+Also avoid a function call for the queue live fast path by inlining
+the check.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: James Smart <james.smart@broadcom.com>
+Signed-off-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/nvme/host/fabrics.c | 59 ++++++++++++++++++---------------------------
+ drivers/nvme/host/fabrics.h | 13 ++++++++--
+ drivers/nvme/host/fc.c | 9 +++----
+ drivers/nvme/host/rdma.c | 7 +++---
+ drivers/nvme/target/loop.c | 7 +++---
+ 5 files changed, 45 insertions(+), 50 deletions(-)
+
+diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
+index e39a10337a93..cdd7a48650eb 100644
+--- a/drivers/nvme/host/fabrics.c
++++ b/drivers/nvme/host/fabrics.c
+@@ -534,38 +534,40 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
+ return NULL;
+ }
+
+-int nvmf_check_if_ready(struct nvme_ctrl *ctrl,
+- struct request *rq, bool queue_live, bool is_connected)
++/*
++ * For something we're not in a state to send to the device the default action
++ * is to busy it and retry it after the controller state is recovered. However,
++ * anything marked for failfast or nvme multipath is immediately failed.
++ *
++ * Note: commands used to initialize the controller will be marked for failfast.
++ * Note: nvme cli/ioctl commands are marked for failfast.
++ */
++int nvmf_fail_nonready_command(struct request *rq)
+ {
+- struct nvme_command *cmd = nvme_req(rq)->cmd;
++ if (!blk_noretry_request(rq))
++ return BLK_MQ_RQ_QUEUE_BUSY; /* try again later */
++ nvme_req(rq)->status = NVME_SC_ABORT_REQ;
++ return BLK_MQ_RQ_QUEUE_ERROR;
++}
++EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command);
+
+- if (ctrl->state == NVME_CTRL_LIVE && is_connected)
+- return 0;
++bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
++ bool queue_live)
++{
++ struct nvme_command *cmd = nvme_req(rq)->cmd;
+
+ switch (ctrl->state) {
+ case NVME_CTRL_NEW:
+ case NVME_CTRL_RECONNECTING:
+ case NVME_CTRL_DELETING:
+ /*
+- * This is the case of starting a new or deleting an association
+- * but connectivity was lost before it was fully created or torn
+- * down. We need to error the commands used to initialize the
+- * controller so the reconnect can go into a retry attempt. The
+- * commands should all be marked REQ_FAILFAST_DRIVER, which will
+- * hit the reject path below. Anything else will be queued while
+- * the state settles.
+- */
+- if (!is_connected)
+- break;
+-
+- /*
+ * If queue is live, allow only commands that are internally
+ * generated pass through. These are commands on the admin
+ * queue to initialize the controller. This will reject any
+ * ioctl admin cmds received while initializing.
+ */
+ if (queue_live && !(nvme_req(rq)->flags & NVME_REQ_USERCMD))
+- return 0;
++ return true;
+
+ /*
+ * If the queue is not live, allow only a connect command. This
+@@ -575,27 +577,14 @@ int nvmf_check_if_ready(struct nvme_ctrl *ctrl,
+ if (!queue_live && rq->cmd_type == REQ_TYPE_DRV_PRIV &&
+ cmd->common.opcode == nvme_fabrics_command &&
+ cmd->fabrics.fctype == nvme_fabrics_type_connect)
+- return 0;
+- break;
++ return true;
++ return false;
+
+ default:
+- break;
++ return false;
+ }
+-
+- /*
+- * Any other new io is something we're not in a state to send to the
+- * device. Default action is to busy it and retry it after the
+- * controller state is recovered. However, anything marked for failfast
+- * or nvme multipath is immediately failed. Note: commands used to
+- * initialize the controller will be marked for failfast.
+- * Note: nvme cli/ioctl commands are marked for failfast.
+- */
+- if (!blk_noretry_request(rq))
+- return BLK_MQ_RQ_QUEUE_BUSY; /* try again later */
+- nvme_req(rq)->status = NVME_SC_ABORT_REQ;
+- return BLK_MQ_RQ_QUEUE_ERROR;
+ }
+-EXPORT_SYMBOL_GPL(nvmf_check_if_ready);
++EXPORT_SYMBOL_GPL(__nvmf_check_ready);
+
+ static const match_table_t opt_tokens = {
+ { NVMF_OPT_TRANSPORT, "transport=%s" },
+diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
+index c2a164009da3..812ee2609fc3 100644
+--- a/drivers/nvme/host/fabrics.h
++++ b/drivers/nvme/host/fabrics.h
+@@ -160,7 +160,16 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts);
+ const char *nvmf_get_subsysnqn(struct nvme_ctrl *ctrl);
+ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
+ bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
+-int nvmf_check_if_ready(struct nvme_ctrl *ctrl,
+- struct request *rq, bool queue_live, bool is_connected);
++int nvmf_fail_nonready_command(struct request *rq);
++bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
++ bool queue_live);
++
++static inline bool nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
++ bool queue_live)
++{
++ if (likely(ctrl->state == NVME_CTRL_LIVE))
++ return true;
++ return __nvmf_check_ready(ctrl, rq, queue_live);
++}
+
+ #endif /* _NVME_FABRICS_H */
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index bb5d29c925f7..b8c55eb589b8 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -2310,14 +2310,13 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
+ struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
+ struct nvme_command *sqe = &cmdiu->sqe;
+ enum nvmefc_fcp_datadir io_dir;
++ bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
+ u32 data_len;
+ int ret;
+
+- ret = nvmf_check_if_ready(&queue->ctrl->ctrl, rq,
+- test_bit(NVME_FC_Q_LIVE, &queue->flags),
+- ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE);
+- if (unlikely(ret))
+- return ret;
++ if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
++ !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
++ return nvmf_fail_nonready_command(rq);
+
+ ret = nvme_setup_cmd(ns, rq, sqe);
+ if (ret)
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index e13509970e24..d04f94320cbd 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -1456,14 +1456,13 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
+ struct nvme_command *c = sqe->data;
+ bool flush = false;
+ struct ib_device *dev;
++ bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags);
+ int ret;
+
+ WARN_ON_ONCE(rq->tag < 0);
+
+- ret = nvmf_check_if_ready(&queue->ctrl->ctrl, rq,
+- test_bit(NVME_RDMA_Q_LIVE, &queue->flags), true);
+- if (unlikely(ret))
+- goto err;
++ if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
++ return nvmf_fail_nonready_command(rq);
+
+ dev = queue->device->dev;
+ ib_dma_sync_single_for_cpu(dev, sqe->dma,
+diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
+index 339079087da8..d24c440cceeb 100644
+--- a/drivers/nvme/target/loop.c
++++ b/drivers/nvme/target/loop.c
+@@ -171,12 +171,11 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
+ struct nvme_loop_queue *queue = hctx->driver_data;
+ struct request *req = bd->rq;
+ struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
++ bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
+ int ret;
+
+- ret = nvmf_check_if_ready(&queue->ctrl->ctrl, req,
+- test_bit(NVME_LOOP_Q_LIVE, &queue->flags), true);
+- if (unlikely(ret))
+- return ret;
++ if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
++ return nvmf_fail_nonready_command(req);
+
+ ret = nvme_setup_cmd(ns, req, &iod->cmd);
+ if (ret != BLK_MQ_RQ_QUEUE_OK)
+--
+2.12.3
+
diff --git a/patches.drivers/nvme-fc-change-controllers-first-connect-to-use-reco.patch b/patches.drivers/nvme-fc-change-controllers-first-connect-to-use-reco.patch
new file mode 100644
index 0000000000..be87cd2892
--- /dev/null
+++ b/patches.drivers/nvme-fc-change-controllers-first-connect-to-use-reco.patch
@@ -0,0 +1,265 @@
+From: James Smart <jsmart2021@gmail.com>
+Date: Wed, 13 Jun 2018 14:07:37 -0700
+Subject: [PATCH] nvme-fc: change controllers first connect to use reconnect
+ path
+References: bsc#1098527
+Git-commit: 4c984154efa13175bbb1e2aeb1de9fb2960ca28c
+Patch-mainline: v4.18-rc1
+
+Current code follows the framework that has been in the transports
+from the beginning where initial link-side controller connect occurs
+as part of "creating the controller". Thus that first connect fully
+talks to the controller and obtains values that can then be used in
+for blk-mq setup, etc. It also means that everything about the
+controller is fully know before the "create controller" call returns.
+
+This has several weaknesses:
+- The initial create_ctrl call made by the cli will block for a long
+ time as wire transactions are performed synchronously. This delay
+ becomes longer if errors occur or connectivity is lost and retries
+ need to be performed.
+- Code wise, it means there is a separate connect path for initial
+ controller connect vs the (same) steps used in the reconnect path.
+- And as there's separate paths, it means there's separate error
+ handling and retry logic. It also plays havoc with the NEW state
+ (should transition out of it after successful initial connect) vs
+ the RESETTING and CONNECTING (reconnect) states that want to be
+ transitioned to on error.
+- As there's separate paths, to recover from errors and disruptions,
+ it requires separate recovery/retry paths as well and can severely
+ convolute the controller state.
+
+This patch reworks the fc transport to use the same connect paths
+for the initial connection as it uses for reconnect. This makes a
+single path for error recovery and handling.
+
+This patch:
+- Removes the driving of the initial connect and replaces it with
+ a state transition to CONNECTING and initiating the reconnect
+ thread. A dummy state transition of RESETTING had to be traversed
+ as a direct transtion of NEW->CONNECTING is not allowed. Given
+ that the controller is "new", the RESETTING transition is a simple
+ no-op. Once in the reconnecting thread, the normal behaviors of
+ ctrl_loss_tmo (max_retries * connect_delay) and dev_loss_tmo will
+ apply before the controller is torn down.
+- Only if the state transitions couldn't be traversed and the
+ reconnect thread not scheduled, will the controller be torn down
+ while in create_ctrl.
+- The prior code used the controller state of NEW to indicate
+ whether request queues had been initialized or not. For the admin
+ queue, the request queue is always created, so there's no need to
+ check a state. For IO queues, change to tracking whether a successful
+ io request queue create has occurred (e.g. 1st successful connect).
+- The initial controller id is initialized to the dynamic controller
+ id used in the initial connect message. It will be overwritten by
+ the real controller id once the controller is connected on the wire.
+
+Signed-off-by: James Smart <james.smart@broadcom.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/nvme/host/fc.c | 108 ++++++++++++++++++++++---------------------------
+ 1 file changed, 49 insertions(+), 59 deletions(-)
+
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index ee6245c33c45..b8383ebfb296 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -154,6 +154,7 @@ struct nvme_fc_ctrl {
+ u32 queue_count;
+ u32 cnum;
+
++ bool ioq_live;
+ bool assoc_active;
+ u64 association_id;
+
+@@ -2514,6 +2515,8 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
+ if (ret)
+ goto out_delete_hw_queues;
+
++ ctrl->ioq_live = true;
++
+ return 0;
+
+ out_delete_hw_queues:
+@@ -2667,8 +2670,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
+ if (ret)
+ goto out_delete_hw_queue;
+
+- if (ctrl->ctrl.state != NVME_CTRL_NEW)
+- blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
++ blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
+
+ ret = nvmf_connect_admin_queue(&ctrl->ctrl);
+ if (ret)
+@@ -2743,7 +2745,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
+ */
+
+ if (ctrl->queue_count > 1) {
+- if (ctrl->ctrl.state == NVME_CTRL_NEW)
++ if (!ctrl->ioq_live)
+ ret = nvme_fc_create_io_queues(ctrl);
+ else
+ ret = nvme_fc_reinit_io_queues(ctrl);
+@@ -2836,8 +2838,7 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
+ * use blk_mq_tagset_busy_itr() and the transport routine to
+ * terminate the exchanges.
+ */
+- if (ctrl->ctrl.state != NVME_CTRL_NEW)
+- blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
++ blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
+ blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
+ nvme_fc_terminate_exchange, &ctrl->ctrl);
+
+@@ -3072,7 +3073,7 @@ nvme_fc_connect_ctrl_work(struct work_struct *work)
+ nvme_fc_reconnect_or_delete(ctrl, ret);
+ else
+ dev_info(ctrl->ctrl.device,
+- "NVME-FC{%d}: controller reconnect complete\n",
++ "NVME-FC{%d}: controller connect complete\n",
+ ctrl->cnum);
+ }
+
+@@ -3121,7 +3122,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
+ {
+ struct nvme_fc_ctrl *ctrl;
+ unsigned long flags;
+- int ret, idx, retry;
++ int ret, idx;
+
+ if (!(rport->remoteport.port_role &
+ (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
+@@ -3149,13 +3150,15 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
+ }
+
+ ctrl->ctrl.opts = opts;
++ ctrl->ctrl.opts->nr_reconnects = 0;
+ INIT_LIST_HEAD(&ctrl->ctrl_list);
+ ctrl->lport = lport;
+ ctrl->rport = rport;
+ ctrl->dev = lport->dev;
+ ctrl->cnum = idx;
+- init_waitqueue_head(&ctrl->ioabort_wait);
++ ctrl->ioq_live = false;
+ ctrl->assoc_active = false;
++ init_waitqueue_head(&ctrl->ioabort_wait);
+
+ get_device(ctrl->dev);
+ kref_init(&ctrl->ref);
+@@ -3173,6 +3176,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
+
+ ctrl->ctrl.sqsize = opts->queue_size - 1;
+ ctrl->ctrl.kato = opts->kato;
++ ctrl->ctrl.cntlid = 0xffff;
+
+ ret = -ENOMEM;
+ ctrl->queues = kcalloc(ctrl->queue_count, sizeof(struct nvme_fc_queue),
+@@ -3220,68 +3224,54 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
+ list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+- /*
+- * It's possible that transactions used to create the association
+- * may fail. Examples: CreateAssociation LS or CreateIOConnection
+- * LS gets dropped/corrupted/fails; or a frame gets dropped or a
+- * command times out for one of the actions to init the controller
+- * (Connect, Get/Set_Property, Set_Features, etc). Many of these
+- * transport errors (frame drop, LS failure) inherently must kill
+- * the association. The transport is coded so that any command used
+- * to create the association (prior to a LIVE state transition
+- * while NEW or RECONNECTING) will fail if it completes in error or
+- * times out.
+- *
+- * As such: as the connect request was mostly likely due to a
+- * udev event that discovered the remote port, meaning there is
+- * not an admin or script there to restart if the connect
+- * request fails, retry the initial connection creation up to
+- * three times before giving up and declaring failure.
+- */
+- for (retry = 0; retry < 3; retry++) {
+- ret = nvme_fc_create_association(ctrl);
+- if (!ret)
+- break;
++ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
++ !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
++ dev_err(ctrl->ctrl.device,
++ "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
++ goto fail_ctrl;
+ }
++
++ kref_get(&ctrl->ctrl.kref);
+
+- if (ret) {
+- nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
+- cancel_work_sync(&ctrl->reset_work);
+- cancel_delayed_work_sync(&ctrl->connect_work);
+-
+- /* couldn't schedule retry - fail out */
++ if (!queue_delayed_work(nvme_fc_wq, &ctrl->connect_work, 0)) {
++ nvme_put_ctrl(&ctrl->ctrl);
+ dev_err(ctrl->ctrl.device,
+- "NVME-FC{%d}: Connect retry failed\n", ctrl->cnum);
++ "NVME-FC{%d}: failed to schedule initial connect\n",
++ ctrl->cnum);
++ goto fail_ctrl;
++ }
+
+- ctrl->ctrl.opts = NULL;
++ flush_delayed_work(&ctrl->connect_work);
++
++ dev_info(ctrl->ctrl.device,
++ "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
++ ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
+
+- /* initiate nvme ctrl ref counting teardown */
+- nvme_uninit_ctrl(&ctrl->ctrl);
++ return &ctrl->ctrl;
+
+- /* Remove core ctrl ref. */
+- nvme_put_ctrl(&ctrl->ctrl);
++fail_ctrl:
++ nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
++ cancel_work_sync(&ctrl->reset_work);
++ cancel_delayed_work_sync(&ctrl->connect_work);
+
+- /* as we're past the point where we transition to the ref
+- * counting teardown path, if we return a bad pointer here,
+- * the calling routine, thinking it's prior to the
+- * transition, will do an rport put. Since the teardown
+- * path also does a rport put, we do an extra get here to
+- * so proper order/teardown happens.
+- */
+- nvme_fc_rport_get(rport);
++ ctrl->ctrl.opts = NULL;
+
+- if (ret > 0)
+- ret = -EIO;
+- return ERR_PTR(ret);
+- }
++ /* initiate nvme ctrl ref counting teardown */
++ nvme_uninit_ctrl(&ctrl->ctrl);
+
+- kref_get(&ctrl->ctrl.kref);
++ /* Remove core ctrl ref. */
++ nvme_put_ctrl(&ctrl->ctrl);
+
+- dev_info(ctrl->ctrl.device,
+- "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
+- ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
++ /* as we're past the point where we transition to the ref
++ * counting teardown path, if we return a bad pointer here,
++ * the calling routine, thinking it's prior to the
++ * transition, will do an rport put. Since the teardown
++ * path also does a rport put, we do an extra get here to
++ * so proper order/teardown happens.
++ */
++ nvme_fc_rport_get(rport);
+
+- return &ctrl->ctrl;
++ return ERR_PTR(-EIO);
+
+ out_cleanup_admin_q:
+ blk_cleanup_queue(ctrl->ctrl.admin_q);
+--
+2.12.3
+
diff --git a/patches.drivers/nvme-fc-fix-nulling-of-queue-data-on-reconnect.patch b/patches.drivers/nvme-fc-fix-nulling-of-queue-data-on-reconnect.patch
new file mode 100644
index 0000000000..1ff724ee87
--- /dev/null
+++ b/patches.drivers/nvme-fc-fix-nulling-of-queue-data-on-reconnect.patch
@@ -0,0 +1,83 @@
+From: James Smart <jsmart2021@gmail.com>
+Date: Wed, 13 Jun 2018 14:07:38 -0700
+Subject: [PATCH] nvme-fc: fix nulling of queue data on reconnect
+References: bsc#1098527
+Git-commit: 3e493c00cedb457c0731399a835f7ba1c6df172b
+Patch-mainline: v4.18-rc1
+
+The reconnect path is calling the init routines to clear a queue
+structure. But the queue structure has state that perhaps needs
+to persist as long as the controller is live.
+
+Remove the nvme_fc_init_queue() calls on reconnect.
+The nvme_fc_free_queue() calls will clear state bits and reset
+any relevant queue state for a new connection.
+
+Signed-off-by: James Smart <james.smart@broadcom.com>
+Reviewed-by: Hannes Reinecke <hare@suse.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/nvme/host/fc.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index 17e18f879d60..bb5d29c925f7 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -1913,6 +1913,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
+ */
+
+ queue->connection_id = 0;
++ atomic_set(&queue->csn, 1);
+ }
+
+ static void
+@@ -2519,7 +2520,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
+ }
+
+ static int
+-nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
++nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
+ {
+ struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+ unsigned int nr_io_queues;
+@@ -2539,8 +2540,6 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
+ if (ctrl->queue_count == 1)
+ return 0;
+
+- nvme_fc_init_io_queues(ctrl);
+-
+ ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
+ if (ret)
+ goto out_free_io_queues;
+@@ -2638,8 +2637,6 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
+ * Create the admin queue
+ */
+
+- nvme_fc_init_queue(ctrl, 0);
+-
+ ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
+ NVMF_AQ_DEPTH);
+ if (ret)
+@@ -2728,7 +2725,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
+ if (!ctrl->ioq_live)
+ ret = nvme_fc_create_io_queues(ctrl);
+ else
+- ret = nvme_fc_reinit_io_queues(ctrl);
++ ret = nvme_fc_recreate_io_queues(ctrl);
+ if (ret)
+ goto out_term_aen_ops;
+ }
+@@ -3163,6 +3160,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
+ if (!ctrl->queues)
+ goto out_free_ida;
+
++ nvme_fc_init_queue(ctrl, 0);
++
+ memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
+ ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
+ ctrl->admin_tag_set.queue_depth = NVME_FC_AQ_BLKMQ_DEPTH;
+--
+2.12.3
+
diff --git a/patches.drivers/nvme-fc-remove-reinit_request-routine.patch b/patches.drivers/nvme-fc-remove-reinit_request-routine.patch
new file mode 100644
index 0000000000..ce816616e3
--- /dev/null
+++ b/patches.drivers/nvme-fc-remove-reinit_request-routine.patch
@@ -0,0 +1,77 @@
+From: James Smart <jsmart2021@gmail.com>
+Date: Wed, 13 Jun 2018 14:07:36 -0700
+Subject: [PATCH] nvme-fc: remove reinit_request routine
+References: bsc#1098527
+Git-commit: 587331f71e2748371526597cafc72e5732c67e88
+Patch-mainline: v4.18-rc1
+
+The reinit_request routine is not necessary. Remove support for the
+op callback.
+
+As all that nvme_reinit_tagset() does is itterate and call the
+reinit routine, it too has no purpose. Remove the call.
+
+Signed-off-by: James Smart <james.smart@broadcom.com>
+Reviewed-by: Hannes Reinecke <hare@suse.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/nvme/host/fc.c | 21 ---------------------
+ 1 file changed, 21 deletions(-)
+
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index b8383ebfb296..17e18f879d60 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -1493,21 +1493,6 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
+
+ static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
+
+-static int
+-nvme_fc_reinit_request(void *data, struct request *rq)
+-{
+- struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+- struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
+-
+- memset(cmdiu, 0, sizeof(*cmdiu));
+- cmdiu->scsi_id = NVME_CMD_SCSI_ID;
+- cmdiu->fc_id = NVME_CMD_FC_ID;
+- cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
+- memset(&op->rsp_iu, 0, sizeof(op->rsp_iu));
+-
+- return 0;
+-}
+-
+ static void
+ __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
+ struct nvme_fc_fcp_op *op)
+@@ -2453,7 +2438,6 @@ static const struct blk_mq_ops nvme_fc_mq_ops = {
+ .complete = nvme_fc_complete_rq,
+ .init_request = nvme_fc_init_request,
+ .exit_request = nvme_fc_exit_request,
+- .reinit_request = nvme_fc_reinit_request,
+ .init_hctx = nvme_fc_init_hctx,
+ .poll = nvme_fc_poll,
+ .timeout = nvme_fc_timeout,
+@@ -2557,10 +2541,6 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
+
+ nvme_fc_init_io_queues(ctrl);
+
+- ret = blk_mq_reinit_tagset(&ctrl->tag_set);
+- if (ret)
+- goto out_free_io_queues;
+-
+ ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
+ if (ret)
+ goto out_free_io_queues;
+@@ -3083,7 +3063,6 @@ static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
+ .complete = nvme_fc_complete_rq,
+ .init_request = nvme_fc_init_admin_request,
+ .exit_request = nvme_fc_exit_request,
+- .reinit_request = nvme_fc_reinit_request,
+ .init_hctx = nvme_fc_init_admin_hctx,
+ .timeout = nvme_fc_timeout,
+ };
+--
+2.12.3
+
diff --git a/patches.drivers/nvme-fc-remove-setting-DNR-on-exception-conditions.patch b/patches.drivers/nvme-fc-remove-setting-DNR-on-exception-conditions.patch
new file mode 100644
index 0000000000..2feba0591c
--- /dev/null
+++ b/patches.drivers/nvme-fc-remove-setting-DNR-on-exception-conditions.patch
@@ -0,0 +1,44 @@
+From: James Smart <jsmart2021@gmail.com>
+Date: Fri, 11 May 2018 17:50:24 -0700
+Subject: [PATCH] nvme-fc: remove setting DNR on exception conditions
+References: bsc#1098527
+Git-commit: 90fcaf5d54c57037e9f879f17b58497db7156c3e
+Patch-mainline: v4.18-rc1
+
+Current code will set DNR if the controller is deleting or there is
+an error during controller init. None of this is necessary.
+
+Remove the code that sets DNR
+
+Signed-off-by: James Smart <james.smart@broadcom.com>
+Reviewed-by: Hannes Reinecke <hare@suse.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/nvme/host/fc.c | 10 ----------
+ 1 file changed, 10 deletions(-)
+
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index 0ff346d4c0b4..26a10fff6029 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -1708,16 +1708,6 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
+ goto check_error;
+ }
+
+- /*
+- * Force failures of commands if we're killing the controller
+- * or have an error on a command used to create an new association
+- */
+- if (status &&
+- (blk_queue_dying(rq->q) ||
+- ctrl->ctrl.state == NVME_CTRL_NEW ||
+- ctrl->ctrl.state == NVME_CTRL_RECONNECTING))
+- status |= cpu_to_le16(NVME_SC_DNR << 1);
+-
+ __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
+ nvme_end_request(rq, status, result);
+
+--
+2.12.3
+
diff --git a/patches.drivers/nvme-move-init-of-keep_alive-work-item-to-controller.patch b/patches.drivers/nvme-move-init-of-keep_alive-work-item-to-controller.patch
new file mode 100644
index 0000000000..d7f8da5e02
--- /dev/null
+++ b/patches.drivers/nvme-move-init-of-keep_alive-work-item-to-controller.patch
@@ -0,0 +1,50 @@
+From bdf65a0fecce0efcf0bcc0d53572c4e6f64c70f5 Mon Sep 17 00:00:00 2001
+From: James Smart <jsmart2021@gmail.com>
+Date: Tue, 12 Jun 2018 16:28:24 -0700
+Subject: [PATCH] nvme: move init of keep_alive work item to controller initialization
+References: bsc#1098527
+Git-commit: dec684fd36ce87fc16918fb909e5d0f1ae42b2e6
+Patch-Mainline: queued in subsystem maintainer repository
+Git-repo: git://git.infradead.org/nvme.git
+
+Currently, the code initializes the keep alive work item whenever
+nvme_start_keep_alive() is called. However, this routine is called
+several times while reconnecting, etc. Although it's hoped that keep
+alive is always disabled and not scheduled when start is called,
+re-initing if it were scheduled or completing can have very bad
+side effects. There's no need for re-initialization.
+
+Move the keep_alive work item and cmd struct initialization to
+controller init.
+
+Signed-off-by: James Smart <james.smart@broadcom.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/nvme/host/core.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index f3e6760845d9..dad451168e3b 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -583,7 +583,6 @@ void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
+ if (unlikely(ctrl->kato == 0))
+ return;
+
+- INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
+ schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+ }
+ EXPORT_SYMBOL_GPL(nvme_start_keep_alive);
+@@ -2205,6 +2204,8 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
+ INIT_WORK(&ctrl->scan_work, nvme_scan_work);
+ INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
+
++ INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
++
+ ret = nvme_set_instance(ctrl);
+ if (ret)
+ goto out;
+--
+2.12.3
+
diff --git a/patches.drivers/nvmet-fc-increase-LS-buffer-count-per-fc-port.patch b/patches.drivers/nvmet-fc-increase-LS-buffer-count-per-fc-port.patch
new file mode 100644
index 0000000000..43f5f491e3
--- /dev/null
+++ b/patches.drivers/nvmet-fc-increase-LS-buffer-count-per-fc-port.patch
@@ -0,0 +1,36 @@
+From: James Smart <jsmart2021@gmail.com>
+Date: Mon, 21 May 2018 16:27:42 -0700
+Subject: [PATCH] nvmet-fc: increase LS buffer count per fc port
+References: bsc#1098527
+Git-commit: 17d78252ee568ecf94df6829c81578c0602991eb
+Patch-mainline: v4.18-rc1
+
+Todays limit on concurrent LS's is very small - 4 buffers. With large
+subsystem counts or large numbers of initiators connecting, the limit
+may be exceeded.
+
+Raise the LS buffer count to 256.
+
+Signed-off-by: James Smart <james.smart@broadcom.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/nvme/target/fc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
+index effdeaa9aa1b..71e09771da94 100644
+--- a/drivers/nvme/target/fc.c
++++ b/drivers/nvme/target/fc.c
+@@ -31,7 +31,7 @@
+ /* *************************** Data Structures/Defines ****************** */
+
+
+-#define NVMET_LS_CTX_COUNT 4
++#define NVMET_LS_CTX_COUNT 256
+
+ /* for this implementation, assume small single frame rqst/rsp */
+ #define NVME_FC_MAX_LS_BUFFER_SIZE 2048
+--
+2.12.3
+
diff --git a/patches.drivers/nvmet-switch-loopback-target-state-to-connecting-whe.patch b/patches.drivers/nvmet-switch-loopback-target-state-to-connecting-whe.patch
new file mode 100644
index 0000000000..89099d5b1a
--- /dev/null
+++ b/patches.drivers/nvmet-switch-loopback-target-state-to-connecting-whe.patch
@@ -0,0 +1,47 @@
+From: Johannes Thumshirn <jthumshirn@suse.de>
+Date: Thu, 3 May 2018 17:00:35 +0200
+Subject: [PATCH] nvmet: switch loopback target state to connecting when
+ resetting
+References: bsc#1098527
+Git-commit: 8bfc3b4c6f9de815de4ab73784b9419348266a65
+Patch-mainline: v4.17-rc4
+
+After commit bb06ec31452f ("nvme: expand nvmf_check_if_ready checks")
+resetting of the loopback nvme target failed as we forgot to switch
+it's state to NVME_CTRL_CONNECTING before we reconnect the admin
+queues. Therefore the checks in nvmf_check_if_ready() choose to go to
+the reject_io case and thus we couldn't sent out an identify
+controller command to reconnect.
+
+Change the controller state to NVME_CTRL_CONNECTING after tearing down
+the old connection and before re-establishing the connection.
+
+Fixes: bb06ec31452f ("nvme: expand nvmf_check_if_ready checks")
+Signed-off-by: Johannes Thumshirn <jthumshirn@suse.de>
+Signed-off-by: Keith Busch <keith.busch@intel.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Hannes Reinecke <hare@suse.de>
+---
+ drivers/nvme/target/loop.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
+index d24c440cceeb..5cb26ab34c93 100644
+--- a/drivers/nvme/target/loop.c
++++ b/drivers/nvme/target/loop.c
+@@ -521,6 +521,12 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
+
+ nvme_loop_shutdown_ctrl(ctrl);
+
++ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
++ /* state change failure should never happen */
++ WARN_ON_ONCE(1);
++ return;
++ }
++
+ ret = nvme_loop_configure_admin_queue(ctrl);
+ if (ret)
+ goto out_disable;
+--
+2.12.3
+
diff --git a/patches.drivers/scsi-qedi-Fix-truncation-of-CHAP-name-and-secret.patch b/patches.drivers/scsi-qedi-Fix-truncation-of-CHAP-name-and-secret.patch
new file mode 100644
index 0000000000..b88816a1be
--- /dev/null
+++ b/patches.drivers/scsi-qedi-Fix-truncation-of-CHAP-name-and-secret.patch
@@ -0,0 +1,80 @@
+From: Andrew Vasquez <andrew.vasquez@cavium.com>
+Date: Wed, 7 Feb 2018 08:12:35 -0800
+Subject: [PATCH] scsi: qedi: Fix truncation of CHAP name and secret
+References: bsc#1097931
+Git-commit: 1683ce57f568c7c92d53e9234624a53554a29cd5
+Patch-mainline: v4.16-rc3
+
+The data in NVRAM is not guaranteed to be NUL terminated. Since
+snprintf expects byte-stream to accommodate null byte, the CHAP secret
+is truncated. Use sprintf instead of snprintf to fix the truncation of
+CHAP name and secret.
+
+Signed-off-by: Andrew Vasquez <andrew.vasquez@cavium.com>
+Signed-off-by: Nilesh Javali <nilesh.javali@cavium.com>
+Reviewed-by: Bart Van Assche <bart.vanassche@wdc.com>
+Acked-by: Chris Leech <cleech@redhat.com>
+Acked-by: Lee Duncan <lduncan@suse.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+---
+ drivers/scsi/qedi/qedi_main.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
+index 58596d17f7d9..7c05be680b94 100644
+--- a/drivers/scsi/qedi/qedi_main.c
++++ b/drivers/scsi/qedi/qedi_main.c
+@@ -1830,8 +1830,8 @@ static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf)
+
+ switch (type) {
+ case ISCSI_BOOT_INI_INITIATOR_NAME:
+- rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
+- initiator->initiator_name.byte);
++ rc = sprintf(str, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN,
++ initiator->initiator_name.byte);
+ break;
+ default:
+ rc = 0;
+@@ -1898,8 +1898,8 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
+
+ switch (type) {
+ case ISCSI_BOOT_TGT_NAME:
+- rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
+- block->target[idx].target_name.byte);
++ rc = sprintf(str, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN,
++ block->target[idx].target_name.byte);
+ break;
+ case ISCSI_BOOT_TGT_IP_ADDR:
+ if (ipv6_en)
+@@ -1920,20 +1920,20 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
+ block->target[idx].lun.value[0]);
+ break;
+ case ISCSI_BOOT_TGT_CHAP_NAME:
+- rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n",
+- chap_name);
++ rc = sprintf(str, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
++ chap_name);
+ break;
+ case ISCSI_BOOT_TGT_CHAP_SECRET:
+- rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n",
+- chap_secret);
++ rc = sprintf(str, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
++ chap_secret);
+ break;
+ case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+- rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n",
+- mchap_name);
++ rc = sprintf(str, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
++ mchap_name);
+ break;
+ case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+- rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n",
+- mchap_secret);
++ rc = sprintf(str, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
++ mchap_secret);
+ break;
+ case ISCSI_BOOT_TGT_FLAGS:
+ rc = snprintf(str, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT);
+--
+2.13.7
+
diff --git a/patches.fixes/nvme-expand-nvmf_check_if_ready-checks.patch b/patches.fixes/nvme-expand-nvmf_check_if_ready-checks.patch
index 965be45218..036bbd35c2 100644
--- a/patches.fixes/nvme-expand-nvmf_check_if_ready-checks.patch
+++ b/patches.fixes/nvme-expand-nvmf_check_if_ready-checks.patch
@@ -1,8 +1,9 @@
From: James Smart <jsmart2021@gmail.com>
-Date: Tue, 27 Mar 2018 16:36:02 -0700
-Subject: [PATCH v4] nvme: expand nvmf_check_if_ready checks
-References: bsc#1085058
-Patch-Mainline: submitted linux-nvme 2018/03/29
+Date: Thu, 12 Apr 2018 09:16:15 -0600
+Subject: [PATCH] nvme: expand nvmf_check_if_ready checks
+References: bsc#1098527
+Git-commit: bb06ec31452fb2da1594f88035c2ecea4e0652f4
+Patch-mainline: v4.17-rc1
** this patch has been adapted to SLES12SP3
@@ -24,27 +25,85 @@ queue-or-reject check is made to look for failfast or multipath ios and
only fails the io if it is so marked. Otherwise, the io will be queued
and wait for the controller state to resolve.
+Admin commands issued via ioctl share a live admin queue with commands
+from the transport for controller init. The ioctls could be intermixed
+with the initialization commands. It's possible for the ioctl cmd to
+be issued prior to the controller being enabled. To block this, the
+ioctl admin commands need to be distinguished from admin commands used
+for controller init. Added a USERCMD nvme_req(req)->rq_flags bit to
+reflect this division and set it on ioctls requests. As the
+nvmf_check_if_ready() routine is called prior to nvme_setup_cmd(),
+ensure that commands allocated by the ioctl path (actually anything
+in core.c) preps the nvme_req(req) before starting the io. This will
+preserve the USERCMD flag during execution and/or retry.
+
Signed-off-by: James Smart <james.smart@broadcom.com>
-Acked-by: Hannes Reinecke <hare@suse.com>
----
-v2:
- needed to set nvme status when rejecting io
-v3:
- renamed qlive to queue_live and connectivity to is_connected
- converted from inline routine to fabrics exported routine.
-v4:
- missing a check on blk_rq_is_passthrough() as not all requests
- have nvme_req(rq)->cmd set.
+Reviewed-by: Sagi Grimberg <sagi@grimberg.e>
+Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
+Signed-off-by: Keith Busch <keith.busch@intel.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Hannes Reinecke <hare@suse.de>
---
+ drivers/nvme/host/core.c | 17 +++++++---
drivers/nvme/host/fabrics.c | 77 +++++++++++++++++++++++++++++++++++++++++++++
drivers/nvme/host/fabrics.h | 32 ++-----------------
drivers/nvme/host/fc.c | 12 ++-----
+ drivers/nvme/host/nvme.h | 1 +
drivers/nvme/host/rdma.c | 14 ++-------
drivers/nvme/target/loop.c | 11 ++-----
- 5 files changed, 86 insertions(+), 60 deletions(-)
+ 7 files changed, 99 insertions(+), 65 deletions(-)
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index b69e81984cce..f3e6760845d9 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -246,6 +246,15 @@ static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk)
+ return NULL;
+ }
+
++static inline void nvme_clear_nvme_request(struct request *req)
++{
++ if (!(req->rq_flags & RQF_DONTPREP)) {
++ nvme_req(req)->retries = 0;
++ nvme_req(req)->flags = 0;
++ req->rq_flags |= RQF_DONTPREP;
++ }
++}
++
+ struct request *nvme_alloc_request(struct request_queue *q,
+ struct nvme_command *cmd, unsigned int flags, int qid)
+ {
+@@ -262,6 +271,7 @@ struct request *nvme_alloc_request(struct request_queue *q,
+
+ req->cmd_type = REQ_TYPE_DRV_PRIV;
+ req->cmd_flags |= REQ_FAILFAST_DRIVER;
++ nvme_clear_nvme_request(req);
+ nvme_req(req)->cmd = cmd;
+
+ return req;
+@@ -350,11 +360,7 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
+ {
+ int ret = BLK_MQ_RQ_QUEUE_OK;
+
+- if (!(req->rq_flags & RQF_DONTPREP)) {
+- nvme_req(req)->retries = 0;
+- nvme_req(req)->flags = 0;
+- req->rq_flags |= RQF_DONTPREP;
+- }
++ nvme_clear_nvme_request(req);
+
+ if (req->cmd_type == REQ_TYPE_DRV_PRIV)
+ memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
+@@ -433,6 +439,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
+ return PTR_ERR(req);
+
+ req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
++ nvme_req(req)->flags |= NVME_REQ_USERCMD;
+
+ if (ubuffer && bufflen) {
+ ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
-index 036db5bb182d..7ed4f34f3dfa 100644
+index 3a7c95044ee6..ad9067b3f237 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -534,6 +534,83 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
@@ -79,8 +138,9 @@ index 036db5bb182d..7ed4f34f3dfa 100644
+ */
+ goto reject_or_queue_io;
+
-+ if (queue_live ||
-+ (rq->cmd_type == REQ_TYPE_DRV_PRIV &&
++ if ((queue_live &&
++ !(nvme_req(rq)->flags & NVME_REQ_USERCMD)) ||
++ (!queue_live && rq->cmd_type == REQ_TYPE_DRV_PRIV &&
+ cmd->common.opcode == nvme_fabrics_command &&
+ cmd->fabrics.fctype == nvme_fabrics_type_connect))
+ /*
@@ -96,7 +156,6 @@ index 036db5bb182d..7ed4f34f3dfa 100644
+ return 0;
+
+ /*
-+ * q isn't live to accept the command.
+ * fall-thru to the reject_or_queue_io clause
+ */
+ break;
@@ -174,10 +233,10 @@ index 16e75a8bdd23..c2a164009da3 100644
#endif /* _NVME_FABRICS_H */
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
-index c98173f83c1e..0ff346d4c0b4 100644
+index ed1614402ea2..bb5d29c925f7 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
-@@ -2321,14 +2321,6 @@ busy:
+@@ -2298,14 +2298,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
return BLK_MQ_RQ_QUEUE_BUSY;
}
@@ -192,7 +251,7 @@ index c98173f83c1e..0ff346d4c0b4 100644
static int
nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
-@@ -2344,7 +2336,9 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
+@@ -2321,7 +2313,9 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
u32 data_len;
int ret;
@@ -203,11 +262,23 @@ index c98173f83c1e..0ff346d4c0b4 100644
if (unlikely(ret))
return ret;
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 3803528b8a73..f171fedd8fa5 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -81,6 +81,7 @@ struct nvme_request {
+
+ enum {
+ NVME_REQ_CANCELLED = (1 << 0),
++ NVME_REQ_USERCMD = (1 << 1),
+ };
+
+ static inline struct nvme_request *nvme_req(struct request *req)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
-index aa3a8a5e57ff..608b0b59dde8 100644
+index 4cc51fd8acd7..e13509970e24 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
-@@ -1441,17 +1441,6 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
+@@ -1445,17 +1445,6 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
return BLK_EH_HANDLED;
}
@@ -225,7 +296,7 @@ index aa3a8a5e57ff..608b0b59dde8 100644
static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
-@@ -1467,7 +1456,8 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
+@@ -1471,7 +1460,8 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
WARN_ON_ONCE(rq->tag < 0);
@@ -265,5 +336,5 @@ index 55cd87e40909..339079087da8 100644
return ret;
--
-2.13.1
+2.12.3
diff --git a/patches.kabi/nvme-reimplement-nvmf_check_if_ready-to-avoid-kabi-b.patch b/patches.kabi/nvme-reimplement-nvmf_check_if_ready-to-avoid-kabi-b.patch
new file mode 100644
index 0000000000..b80a000f2e
--- /dev/null
+++ b/patches.kabi/nvme-reimplement-nvmf_check_if_ready-to-avoid-kabi-b.patch
@@ -0,0 +1,53 @@
+From: Hannes Reinecke <hare@suse.de>
+Date: Fri, 29 Jun 2018 11:43:48 +0200
+Subject: [PATCH] nvme: reimplement nvmf_check_if_ready() to avoid kabi
+ breakage
+Patch-Mainline: no, kABI fix for SLE12-SP3
+References: bsc#1098527
+
+Signed-off-by: Hannes Reinecke <hare@suse.com>
+---
+ drivers/nvme/host/fabrics.c | 13 +++++++++++++
+ drivers/nvme/host/fabrics.h | 2 ++
+ 2 files changed, 15 insertions(+)
+
+diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
+index 3422687a0c31..a35b36291dbb 100644
+--- a/drivers/nvme/host/fabrics.c
++++ b/drivers/nvme/host/fabrics.c
+@@ -584,6 +584,19 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
+ }
+ EXPORT_SYMBOL_GPL(__nvmf_check_ready);
+
++int nvmf_check_if_ready(struct nvme_ctrl *ctrl,
++ struct request *rq, bool queue_live, bool is_connected)
++{
++ if (!is_connected)
++ return BLK_MQ_RQ_QUEUE_BUSY;
++ if (!nvmf_check_ready(ctrl, rq, queue_live)) {
++ nvme_req(rq)->status = NVME_SC_ABORT_REQ;
++ return BLK_MQ_RQ_QUEUE_ERROR;
++ }
++ return 0;
++}
++EXPORT_SYMBOL_GPL(nvmf_check_if_ready);
++
+ static const match_table_t opt_tokens = {
+ { NVMF_OPT_TRANSPORT, "transport=%s" },
+ { NVMF_OPT_TRADDR, "traddr=%s" },
+diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
+index 71c494bbaea2..6e98ecd312d3 100644
+--- a/drivers/nvme/host/fabrics.h
++++ b/drivers/nvme/host/fabrics.h
+@@ -162,6 +162,8 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts);
+ const char *nvmf_get_subsysnqn(struct nvme_ctrl *ctrl);
+ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
+ bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
++int nvmf_check_if_ready(struct nvme_ctrl *ctrl,
++ struct request *rq, bool queue_live, bool is_connected);
+ int nvmf_fail_nonready_command(struct request *rq);
+ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
+ bool queue_live);
+--
+2.12.3
+
diff --git a/patches.suse/x86-spectre_v1-Disable-compiler-optimizations-over-a.patch b/patches.suse/x86-spectre_v1-Disable-compiler-optimizations-over-a.patch
new file mode 100644
index 0000000000..6d12010343
--- /dev/null
+++ b/patches.suse/x86-spectre_v1-Disable-compiler-optimizations-over-a.patch
@@ -0,0 +1,79 @@
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Thu, 7 Jun 2018 09:13:48 -0700
+Subject: x86/spectre_v1: Disable compiler optimizations over
+ array_index_mask_nospec()
+Git-commit: eab6870fee877258122a042bfd99ee7908c40280
+Patch-mainline: v4.18-rc2
+References: bsc#1068032 CVE-2017-5753
+
+Mark Rutland noticed that GCC optimization passes have the potential to elide
+necessary invocations of the array_index_mask_nospec() instruction sequence,
+so mark the asm() volatile.
+
+Mark explains:
+
+"The volatile will inhibit *some* cases where the compiler could lift the
+ array_index_nospec() call out of a branch, e.g. where there are multiple
+ invocations of array_index_nospec() with the same arguments:
+
+ if (idx < foo) {
+ idx1 = array_idx_nospec(idx, foo)
+ do_something(idx1);
+ }
+
+ < some other code >
+
+ if (idx < foo) {
+ idx2 = array_idx_nospec(idx, foo);
+ do_something_else(idx2);
+ }
+
+ ... since the compiler can determine that the two invocations yield the same
+ result, and reuse the first result (likely the same register as idx was in
+ originally) for the second branch, effectively re-writing the above as:
+
+ if (idx < foo) {
+ idx = array_idx_nospec(idx, foo);
+ do_something(idx);
+ }
+
+ < some other code >
+
+ if (idx < foo) {
+ do_something_else(idx);
+ }
+
+ ... if we don't take the first branch, then speculatively take the second, we
+ lose the nospec protection.
+
+ There's more info on volatile asm in the GCC docs:
+
+ https://gcc.gnu.org/onlinedocs/gcc/Extended-Asm.html#Volatile
+ "
+
+Reported-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: <stable@vger.kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Fixes: babdde2698d4 ("x86: Implement array_index_mask_nospec")
+Link: https://lkml.kernel.org/lkml/152838798950.14521.4893346294059739135.stgit@dwillia2-desk3.amr.corp.intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ arch/x86/include/asm/barrier.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/barrier.h
++++ b/arch/x86/include/asm/barrier.h
+@@ -38,7 +38,7 @@ static inline unsigned long array_index_
+ {
+ unsigned long mask;
+
+- asm ("cmp %1,%2; sbb %0,%0;"
++ asm volatile ("cmp %1,%2; sbb %0,%0;"
+ :"=r" (mask)
+ :"g"(size),"r" (index)
+ :"cc");
diff --git a/patches.suse/x86-speculation-Fix-up-array_index_nospec_mask-asm-c.patch b/patches.suse/x86-speculation-Fix-up-array_index_nospec_mask-asm-c.patch
new file mode 100644
index 0000000000..3c0bede4c9
--- /dev/null
+++ b/patches.suse/x86-speculation-Fix-up-array_index_nospec_mask-asm-c.patch
@@ -0,0 +1,33 @@
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Tue, 6 Feb 2018 18:22:40 -0800
+Subject: x86/speculation: Fix up array_index_nospec_mask() asm constraint
+Git-commit: be3233fbfcb8f5acb6e3bcd0895c3ef9e100d470
+Patch-mainline: v4.16-rc2
+References: bsc#1068032 CVE-2017-5753
+
+Allow the compiler to handle @size as an immediate value or memory
+directly rather than allocating a register.
+
+Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/151797010204.1289.1510000292250184993.stgit@dwillia2-desk3.amr.corp.intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ arch/x86/include/asm/barrier.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/barrier.h
++++ b/arch/x86/include/asm/barrier.h
+@@ -40,7 +40,7 @@ static inline unsigned long array_index_
+
+ asm ("cmp %1,%2; sbb %0,%0;"
+ :"=r" (mask)
+- :"r"(size),"r" (index)
++ :"g"(size),"r" (index)
+ :"cc");
+ return mask;
+ }
diff --git a/series.conf b/series.conf
index 3f52f0320a..8ca382cb8e 100644
--- a/series.conf
+++ b/series.conf
@@ -3979,6 +3979,8 @@
########################################################
# x86_64/i386 biarch
########################################################
+ patches.suse/x86-speculation-Fix-up-array_index_nospec_mask-asm-c.patch
+ patches.suse/x86-spectre_v1-Disable-compiler-optimizations-over-a.patch
# fate#321909: Add KNM model
patches.arch/x86-cpu-intel-add-knights-mill-to-intel-family.patch
# Upstream commit c8b5db7de66b75330a96f9f1ad7376b89646c953
@@ -18730,6 +18732,20 @@
patches.drivers/nvme-target-fix-buffer-overflow.patch
patches.drivers/nvme-pci-Fix-EEH-failure-on-ppc.patch
patches.fixes/block-cancel-workqueue-entries-on-blk_mq_freeze_queu.patch
+ patches.drivers/nvme-fc-remove-setting-DNR-on-exception-conditions.patch
+ patches.drivers/nvme-fabrics-centralize-discovery-controller-default.patch
+ patches.drivers/nvme-fabrics-allow-duplicate-connections-to-the-disc.patch
+ patches.drivers/nvmet-fc-increase-LS-buffer-count-per-fc-port.patch
+ patches.drivers/nvme-allow-duplicate-controller-if-prior-controller-.patch
+ patches.drivers/nvme-fc-change-controllers-first-connect-to-use-reco.patch
+ patches.drivers/nvme-fc-remove-reinit_request-routine.patch
+ patches.drivers/nvme-fc-fix-nulling-of-queue-data-on-reconnect.patch
+ patches.drivers/nvme-fabrics-allow-internal-passthrough-command-on-d.patch
+ patches.drivers/nvme-fabrics-refactor-queue-ready-check.patch
+ patches.drivers/nvme-fabrics-fix-and-refine-state-checks-in-__nvmf_c.patch
+ patches.drivers/nvme-move-init-of-keep_alive-work-item-to-controller.patch
+ patches.kabi/nvme-reimplement-nvmf_check_if_ready-to-avoid-kabi-b.patch
+ patches.drivers/nvmet-switch-loopback-target-state-to-connecting-whe.patch
# bsc#1060985
patches.drivers/scsi-sd-Remove-LBPRZ-dependency-for-discards.patch
@@ -23611,6 +23627,9 @@
patches.suse/0001-kvm-Introduce-nopvspin-kernel-parameter.patch
patches.suse/0001-kvm-Fix-nopvspin-static-branch-init-usage.patch
+ # bsc#1097931
+ patches.drivers/scsi-qedi-Fix-truncation-of-CHAP-name-and-secret.patch
+
# SSB
patches.suse/00-arch-introduce-post-init-read-only-memory.patch
patches.suse/01-x86-nospec-simplify-alternative_msr_write.patch