Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2018-03-16 14:07:09 +0100
committerJiri Kosina <jkosina@suse.cz>2018-03-16 14:07:22 +0100
commitf93f788c11e6dde5a125c7618afe9f9ee418f800 (patch)
tree91ed59e175500e746605deb0c8174f09fab0cef6
parent075d4ac5bfe826fe8f866b1b0410512bcffd8d2a (diff)
parent2a6691d40319b93e01d7233122867617448a5ed5 (diff)
Merge remote-tracking branch 'origin/users/tbogendoerfer/SLE15/for-next' into SLE15
Pull IB/RDMA fixes from Thomas Bogendoerfer Conflicts: series.conf
-rw-r--r--patches.drivers/IB-core-Init-subsys-if-compiled-to-vmlinuz-core.patch36
-rw-r--r--patches.drivers/IB-opa_vnic-Properly-clear-Mac-Table-Digest.patch28
-rw-r--r--patches.drivers/IB-opa_vnic-Properly-return-the-total-MACs-in-UC-MAC.patch51
-rw-r--r--patches.drivers/IB-srpt-Disable-RDMA-access-by-the-initiator.patch31
-rw-r--r--patches.drivers/Ib-hfi1-Return-actual-operational-VLs-in-port-info-q.patch41
-rw-r--r--patches.drivers/RDMA-cma-Avoid-triggering-undefined-behavior.patch75
-rw-r--r--patches.drivers/RDMA-cma-Make-sure-that-PSN-is-not-over-max-allowed.patch31
-rw-r--r--patches.drivers/RDMA-rxe-Fix-a-race-condition-in-rxe_requester.patch73
-rw-r--r--patches.drivers/RDMA-rxe-Fix-a-race-condition-related-to-the-QP-erro.patch44
-rw-r--r--patches.drivers/RDMA-rxe-Fix-rxe_qp_cleanup.patch90
-rw-r--r--patches.drivers/i40e-Use-smp_rmb-rather-than-read_barrier_depends.patch47
-rw-r--r--patches.drivers/i40e-don-t-remove-netdev-dev_addr-when-syncing-uc-li.patch94
-rw-r--r--patches.drivers/i40e-fix-client-notify-of-VF-reset.patch42
-rw-r--r--patches.drivers/i40e-i40evf-Account-for-frags-split-over-multiple-de.patch119
-rw-r--r--patches.drivers/i40e-i40evf-spread-CPU-affinity-hints-across-online-.patch125
-rw-r--r--patches.drivers/i40e-use-the-safe-hash-table-iterator-when-deleting-.patch41
-rw-r--r--patches.drivers/i40evf-Use-smp_rmb-rather-than-read_barrier_depends.patch35
-rw-r--r--patches.drivers/scsi-csiostor-add-support-for-32-bit-port-capabiliti.patch634
-rw-r--r--series.conf24
19 files changed, 1658 insertions, 3 deletions
diff --git a/patches.drivers/IB-core-Init-subsys-if-compiled-to-vmlinuz-core.patch b/patches.drivers/IB-core-Init-subsys-if-compiled-to-vmlinuz-core.patch
new file mode 100644
index 0000000000..ed146369fb
--- /dev/null
+++ b/patches.drivers/IB-core-Init-subsys-if-compiled-to-vmlinuz-core.patch
@@ -0,0 +1,36 @@
+From: Dmitry Monakhov <dmonakhov@openvz.org>
+Date: Mon, 27 Nov 2017 13:39:05 +0000
+Subject: IB/core: Init subsys if compiled to vmlinuz-core
+Patch-mainline: v4.15-rc3
+Git-commit: a9cd1a673737dd81332fce1145801bfacfb90d90
+References: bsc#1046306 FATE#322942
+
+Once infiniband is compiled as a core component its subsystem must be
+enabled before device initialization. Otherwise there is a NULL pointer
+dereference during mlx4_core init, calltrace:
+->device_add
+ if (dev->class) {
+ deref dev->class->p =>NULLPTR
+
+#Config
+CONFIG_NET_DEVLINK=y
+CONFIG_MAY_USE_DEVLINK=y
+CONFIG_MLX4_EN=y
+
+Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
+Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/core/device.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/core/device.c
++++ b/drivers/infiniband/core/device.c
+@@ -1253,5 +1253,5 @@ static void __exit ib_core_cleanup(void)
+
+ MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
+
+-module_init(ib_core_init);
++subsys_initcall(ib_core_init);
+ module_exit(ib_core_cleanup);
diff --git a/patches.drivers/IB-opa_vnic-Properly-clear-Mac-Table-Digest.patch b/patches.drivers/IB-opa_vnic-Properly-clear-Mac-Table-Digest.patch
new file mode 100644
index 0000000000..933a043b5c
--- /dev/null
+++ b/patches.drivers/IB-opa_vnic-Properly-clear-Mac-Table-Digest.patch
@@ -0,0 +1,28 @@
+From: Scott Franco <safranco@intel.com>
+Date: Tue, 26 Sep 2017 06:44:13 -0700
+Subject: IB/opa_vnic: Properly clear Mac Table Digest
+Patch-mainline: v4.15-rc1
+Git-commit: 4bbdfe25600c1909c26747d0b5c39fd0e409bb87
+References: bsc#1060463 FATE#323043
+
+Clear the MAC table digest when the MAC table is freed.
+
+Reviewed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
+Signed-off-by: Scott Franco <safranco@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
++++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
+@@ -139,6 +139,7 @@ void opa_vnic_release_mac_tbl(struct opa
+ rcu_assign_pointer(adapter->mactbl, NULL);
+ synchronize_rcu();
+ opa_vnic_free_mac_tbl(mactbl);
++ adapter->info.vport.mac_tbl_digest = 0;
+ mutex_unlock(&adapter->mactbl_lock);
+ }
+
diff --git a/patches.drivers/IB-opa_vnic-Properly-return-the-total-MACs-in-UC-MAC.patch b/patches.drivers/IB-opa_vnic-Properly-return-the-total-MACs-in-UC-MAC.patch
new file mode 100644
index 0000000000..9f2845ff0f
--- /dev/null
+++ b/patches.drivers/IB-opa_vnic-Properly-return-the-total-MACs-in-UC-MAC.patch
@@ -0,0 +1,51 @@
+From: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
+Date: Tue, 26 Sep 2017 06:44:07 -0700
+Subject: IB/opa_vnic: Properly return the total MACs in UC MAC list
+Patch-mainline: v4.15-rc1
+Git-commit: b77eb45e0d9c324245d165656ab3b38b6f386436
+References: bsc#1060463 FATE#323043
+
+Do not include EM specified MAC address in total MACs of the
+UC MAC list.
+
+Reviewed-by: Sudeep Dutt <sudeep.dutt@intel.com>
+Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c
++++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c
+@@ -348,7 +348,7 @@ void opa_vnic_query_mcast_macs(struct op
+ void opa_vnic_query_ucast_macs(struct opa_vnic_adapter *adapter,
+ struct opa_veswport_iface_macs *macs)
+ {
+- u16 start_idx, tot_macs, num_macs, idx = 0, count = 0;
++ u16 start_idx, tot_macs, num_macs, idx = 0, count = 0, em_macs = 0;
+ struct netdev_hw_addr *ha;
+
+ start_idx = be16_to_cpu(macs->start_idx);
+@@ -359,8 +359,10 @@ void opa_vnic_query_ucast_macs(struct op
+
+ /* Do not include EM specified MAC address */
+ if (!memcmp(adapter->info.vport.base_mac_addr, ha->addr,
+- ARRAY_SIZE(adapter->info.vport.base_mac_addr)))
++ ARRAY_SIZE(adapter->info.vport.base_mac_addr))) {
++ em_macs++;
+ continue;
++ }
+
+ if (start_idx > idx++)
+ continue;
+@@ -383,7 +385,7 @@ void opa_vnic_query_ucast_macs(struct op
+ }
+
+ tot_macs = netdev_hw_addr_list_count(&adapter->netdev->dev_addrs) +
+- netdev_uc_count(adapter->netdev);
++ netdev_uc_count(adapter->netdev) - em_macs;
+ macs->tot_macs_in_lst = cpu_to_be16(tot_macs);
+ macs->num_macs_in_msg = cpu_to_be16(count);
+ macs->gen_count = cpu_to_be16(adapter->info.vport.uc_macs_gen_count);
diff --git a/patches.drivers/IB-srpt-Disable-RDMA-access-by-the-initiator.patch b/patches.drivers/IB-srpt-Disable-RDMA-access-by-the-initiator.patch
new file mode 100644
index 0000000000..31a9d19592
--- /dev/null
+++ b/patches.drivers/IB-srpt-Disable-RDMA-access-by-the-initiator.patch
@@ -0,0 +1,31 @@
+From: Bart Van Assche <bart.vanassche@wdc.com>
+Date: Wed, 3 Jan 2018 13:39:15 -0800
+Subject: IB/srpt: Disable RDMA access by the initiator
+Patch-mainline: v4.15-rc8
+Git-commit: bec40c26041de61162f7be9d2ce548c756ce0f65
+References: bsc#1046306 FATE#322942
+
+With the SRP protocol all RDMA operations are initiated by the target.
+Since no RDMA operations are initiated by the initiator, do not grant
+the initiator permission to submit RDMA reads or writes to the target.
+
+Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/ulp/srpt/ib_srpt.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -1000,8 +1000,7 @@ static int srpt_init_ch_qp(struct srpt_r
+ return -ENOMEM;
+
+ attr->qp_state = IB_QPS_INIT;
+- attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
+- IB_ACCESS_REMOTE_WRITE;
++ attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
+ attr->port_num = ch->sport->port;
+ attr->pkey_index = 0;
+
diff --git a/patches.drivers/Ib-hfi1-Return-actual-operational-VLs-in-port-info-q.patch b/patches.drivers/Ib-hfi1-Return-actual-operational-VLs-in-port-info-q.patch
new file mode 100644
index 0000000000..0b6d9c7bc4
--- /dev/null
+++ b/patches.drivers/Ib-hfi1-Return-actual-operational-VLs-in-port-info-q.patch
@@ -0,0 +1,41 @@
+From: Patel Jay P <jay.p.patel@intel.com>
+Date: Mon, 23 Oct 2017 06:05:53 -0700
+Subject: Ib/hfi1: Return actual operational VLs in port info query
+Patch-mainline: v4.15-rc1
+Git-commit: 00f9203119dd2774564407c7a67b17d81916298b
+References: bsc#1060463 FATE#323043
+
+__subn_get_opa_portinfo stores value returned by hfi1_get_ib_cfg() as
+operational vls. hfi1_get_ib_cfg() returns vls_operational field in
+hfi1_pportdata. The problem with this is that the value is always equal
+to vls_supported field in hfi1_pportdata.
+
+The logic to calculate operational_vls is to set value passed by FM
+(in __subn_set_opa_portinfo routine). If no value is passed then
+default value is stored in operational_vls.
+
+Field actual_vls_operational is calculated on the basis of buffer
+control table. Hence, modifying hfi1_get_ib_cfg() to return
+actual_operational_vls when used with HFI1_IB_CFG_OP_VLS parameter
+
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Patel Jay P <jay.p.patel@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/chip.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hfi1/chip.c
++++ b/drivers/infiniband/hw/hfi1/chip.c
+@@ -9981,7 +9981,7 @@ int hfi1_get_ib_cfg(struct hfi1_pportdat
+ goto unimplemented;
+
+ case HFI1_IB_CFG_OP_VLS:
+- val = ppd->vls_operational;
++ val = ppd->actual_vls_operational;
+ break;
+ case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
+ val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
diff --git a/patches.drivers/RDMA-cma-Avoid-triggering-undefined-behavior.patch b/patches.drivers/RDMA-cma-Avoid-triggering-undefined-behavior.patch
new file mode 100644
index 0000000000..4c88e46851
--- /dev/null
+++ b/patches.drivers/RDMA-cma-Avoid-triggering-undefined-behavior.patch
@@ -0,0 +1,75 @@
+From: Bart Van Assche <bart.vanassche@wdc.com>
+Date: Wed, 11 Oct 2017 10:48:45 -0700
+Subject: RDMA/cma: Avoid triggering undefined behavior
+Patch-mainline: v4.15-rc1
+Git-commit: c0b64f58e8d49570aa9ee55d880f92c20ff0166b
+References: bsc#1046306 FATE#322942
+
+According to the C standard the behavior of computations with
+integer operands is as follows:
+* A computation involving unsigned operands can never overflow,
+ because a result that cannot be represented by the resulting
+ unsigned integer type is reduced modulo the number that is one
+ greater than the largest value that can be represented by the
+ resulting type.
+* The behavior for signed integer underflow and overflow is
+ undefined.
+
+Hence only use unsigned integers when checking for integer
+overflow.
+
+This patch is what I came up with after having analyzed the
+following smatch warnings:
+
+drivers/infiniband/core/cma.c:3448: cma_resolve_ib_udp() warn: signed overflow undefined. 'offset + conn_param->private_data_len < conn_param->private_data_len'
+drivers/infiniband/core/cma.c:3505: cma_connect_ib() warn: signed overflow undefined. 'offset + conn_param->private_data_len < conn_param->private_data_len'
+
+Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
+Acked-by: Sean Hefty <sean.hefty@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/core/cma.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -1540,7 +1540,7 @@ static struct rdma_id_private *cma_id_fr
+ return id_priv;
+ }
+
+-static inline int cma_user_data_offset(struct rdma_id_private *id_priv)
++static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv)
+ {
+ return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr);
+ }
+@@ -1942,7 +1942,8 @@ static int cma_req_handler(struct ib_cm_
+ struct rdma_id_private *listen_id, *conn_id = NULL;
+ struct rdma_cm_event event;
+ struct net_device *net_dev;
+- int offset, ret;
++ u8 offset;
++ int ret;
+
+ listen_id = cma_id_from_event(cm_id, ib_event, &net_dev);
+ if (IS_ERR(listen_id))
+@@ -3440,7 +3441,8 @@ static int cma_resolve_ib_udp(struct rdm
+ struct ib_cm_sidr_req_param req;
+ struct ib_cm_id *id;
+ void *private_data;
+- int offset, ret;
++ u8 offset;
++ int ret;
+
+ memset(&req, 0, sizeof req);
+ offset = cma_user_data_offset(id_priv);
+@@ -3497,7 +3499,8 @@ static int cma_connect_ib(struct rdma_id
+ struct rdma_route *route;
+ void *private_data;
+ struct ib_cm_id *id;
+- int offset, ret;
++ u8 offset;
++ int ret;
+
+ memset(&req, 0, sizeof req);
+ offset = cma_user_data_offset(id_priv);
diff --git a/patches.drivers/RDMA-cma-Make-sure-that-PSN-is-not-over-max-allowed.patch b/patches.drivers/RDMA-cma-Make-sure-that-PSN-is-not-over-max-allowed.patch
new file mode 100644
index 0000000000..40e4665642
--- /dev/null
+++ b/patches.drivers/RDMA-cma-Make-sure-that-PSN-is-not-over-max-allowed.patch
@@ -0,0 +1,31 @@
+From: Moni Shoua <monis@mellanox.com>
+Date: Sun, 26 Nov 2017 20:23:54 +0200
+Subject: RDMA/cma: Make sure that PSN is not over max allowed
+Patch-mainline: v4.15-rc3
+Git-commit: 23a9cd2ad90543e9da3786878d2b2729c095439d
+References: bsc#1046306 FATE#322942
+
+This patch limits the initial value for PSN to 24 bits as
+spec requires.
+
+Signed-off-by: Moni Shoua <monis@mellanox.com>
+Signed-off-by: Mukesh Kacker <mukesh.kacker@oracle.com>
+Signed-off-by: Daniel Jurgens <danielj@mellanox.com>
+Reviewed-by: Parav Pandit <parav@mellanox.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/core/cma.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -801,6 +801,7 @@ struct rdma_cm_id *rdma_create_id(struct
+ INIT_LIST_HEAD(&id_priv->mc_list);
+ get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
+ id_priv->id.route.addr.dev_addr.net = get_net(net);
++ id_priv->seq_num &= 0x00ffffff;
+
+ return &id_priv->id;
+ }
diff --git a/patches.drivers/RDMA-rxe-Fix-a-race-condition-in-rxe_requester.patch b/patches.drivers/RDMA-rxe-Fix-a-race-condition-in-rxe_requester.patch
new file mode 100644
index 0000000000..a6f803c221
--- /dev/null
+++ b/patches.drivers/RDMA-rxe-Fix-a-race-condition-in-rxe_requester.patch
@@ -0,0 +1,73 @@
+From: Bart Van Assche <bart.vanassche@wdc.com>
+Date: Fri, 12 Jan 2018 15:11:58 -0800
+Subject: RDMA/rxe: Fix a race condition in rxe_requester()
+Patch-mainline: v4.16-rc1
+Git-commit: 65567e41219888feec72fee1de98ccf1efbbc16d
+References: bsc#1050662 FATE#323951
+
+The rxe driver works as follows:
+* The send queue, receive queue and completion queues are implemented as
+ circular buffers.
+* ib_post_send() and ib_post_recv() calls are serialized through a spinlock.
+* Removing elements from various queues happens from tasklet
+ context. Tasklets are guaranteed to run on at most one CPU. This serializes
+ access to these queues. See also rxe_completer(), rxe_requester() and
+ rxe_responder().
+* rxe_completer() processes the skbs queued onto qp->resp_pkts.
+* rxe_requester() handles the send queue (qp->sq.queue).
+* rxe_responder() processes the skbs queued onto qp->req_pkts.
+
+Since rxe_drain_req_pkts() processes qp->req_pkts, calling
+rxe_drain_req_pkts() from rxe_requester() is racy. Hence this patch.
+
+Reported-by: Moni Shoua <monis@mellanox.com>
+Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/sw/rxe/rxe_loc.h | 1 -
+ drivers/infiniband/sw/rxe/rxe_req.c | 9 +--------
+ drivers/infiniband/sw/rxe/rxe_resp.c | 2 +-
+ 3 files changed, 2 insertions(+), 10 deletions(-)
+
+--- a/drivers/infiniband/sw/rxe/rxe_loc.h
++++ b/drivers/infiniband/sw/rxe/rxe_loc.h
+@@ -237,7 +237,6 @@ int rxe_srq_from_attr(struct rxe_dev *rx
+
+ void rxe_release(struct kref *kref);
+
+-void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify);
+ int rxe_completer(void *arg);
+ int rxe_requester(void *arg);
+ int rxe_responder(void *arg);
+--- a/drivers/infiniband/sw/rxe/rxe_req.c
++++ b/drivers/infiniband/sw/rxe/rxe_req.c
+@@ -594,15 +594,8 @@ int rxe_requester(void *arg)
+ rxe_add_ref(qp);
+
+ next_wqe:
+- if (unlikely(!qp->valid)) {
+- rxe_drain_req_pkts(qp, true);
++ if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
+ goto exit;
+- }
+-
+- if (unlikely(qp->req.state == QP_STATE_ERROR)) {
+- rxe_drain_req_pkts(qp, true);
+- goto exit;
+- }
+
+ if (unlikely(qp->req.state == QP_STATE_RESET)) {
+ qp->req.wqe_index = consumer_index(qp->sq.queue);
+--- a/drivers/infiniband/sw/rxe/rxe_resp.c
++++ b/drivers/infiniband/sw/rxe/rxe_resp.c
+@@ -1210,7 +1210,7 @@ static enum resp_states do_class_d1e_err
+ }
+ }
+
+-void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
++static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
+ {
+ struct sk_buff *skb;
+
diff --git a/patches.drivers/RDMA-rxe-Fix-a-race-condition-related-to-the-QP-erro.patch b/patches.drivers/RDMA-rxe-Fix-a-race-condition-related-to-the-QP-erro.patch
new file mode 100644
index 0000000000..350fb94a34
--- /dev/null
+++ b/patches.drivers/RDMA-rxe-Fix-a-race-condition-related-to-the-QP-erro.patch
@@ -0,0 +1,44 @@
+From: Bart Van Assche <bart.vanassche@wdc.com>
+Date: Tue, 9 Jan 2018 11:23:40 -0800
+Subject: RDMA/rxe: Fix a race condition related to the QP error state
+Patch-mainline: v4.16-rc1
+Git-commit: 6f301e06de4cf9ab7303f5acd43e64fcd4aa04be
+References: bsc#1050662 FATE#323951
+
+The following sequence:
+* Change queue pair state into IB_QPS_ERR.
+* Post a work request on the queue pair.
+
+Triggers the following race condition in the rdma_rxe driver:
+* rxe_qp_error() triggers an asynchronous call of rxe_completer(), the function
+ that examines the QP send queue.
+* rxe_post_send() posts a work request on the QP send queue.
+
+If rxe_completer() runs prior to rxe_post_send(), it will drain the send
+queue and the driver will assume no further action is necessary.
+However, once we post the send to the send queue, because the queue is
+in error, no send completion will ever happen and the send will get
+stuck. In order to process the send, we need to make sure that
+rxe_completer() gets run after a send is posted to a queue pair in an
+error state. This patch ensures that happens.
+
+Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
+Cc: Moni Shoua <monis@mellanox.com>
+Cc: <stable@vger.kernel.org> # v4.8
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/sw/rxe/rxe_verbs.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
+@@ -813,6 +813,8 @@ static int rxe_post_send_kernel(struct r
+ (queue_count(qp->sq.queue) > 1);
+
+ rxe_run_task(&qp->req.task, must_sched);
++ if (unlikely(qp->req.state == QP_STATE_ERROR))
++ rxe_run_task(&qp->comp.task, 1);
+
+ return err;
+ }
diff --git a/patches.drivers/RDMA-rxe-Fix-rxe_qp_cleanup.patch b/patches.drivers/RDMA-rxe-Fix-rxe_qp_cleanup.patch
new file mode 100644
index 0000000000..8f3256f024
--- /dev/null
+++ b/patches.drivers/RDMA-rxe-Fix-rxe_qp_cleanup.patch
@@ -0,0 +1,90 @@
+From: Bart Van Assche <bart.vanassche@wdc.com>
+Date: Fri, 12 Jan 2018 15:11:59 -0800
+Subject: RDMA/rxe: Fix rxe_qp_cleanup()
+Patch-mainline: v4.16-rc1
+Git-commit: bb3ffb7ad48a21e98a5c64eb21103a74fd9f03f6
+References: bsc#1050662 FATE#323951
+
+rxe_qp_cleanup() can sleep so it must be run in thread context and
+not in atomic context. This patch avoids that the following bug is
+triggered:
+
+Kernel BUG at 00000000560033f3 [verbose debug info unavailable]
+BUG: sleeping function called from invalid context at net/core/sock.c:2761
+in_atomic(): 1, irqs_disabled(): 0, pid: 7, name: ksoftirqd/0
+INFO: lockdep is turned off.
+Preemption disabled at:
+[<00000000b6e69628>] __do_softirq+0x4e/0x540
+CPU: 0 PID: 7 Comm: ksoftirqd/0 Not tainted 4.15.0-rc7-dbg+ #4
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.0.0-prebuilt.qemu-project.org 04/01/2014
+Call Trace:
+ dump_stack+0x85/0xbf
+ ___might_sleep+0x177/0x260
+ lock_sock_nested+0x1d/0x90
+ inet_shutdown+0x2e/0xd0
+ rxe_qp_cleanup+0x107/0x140 [rdma_rxe]
+ rxe_elem_release+0x18/0x80 [rdma_rxe]
+ rxe_requester+0x1cf/0x11b0 [rdma_rxe]
+ rxe_do_task+0x78/0xf0 [rdma_rxe]
+ tasklet_action+0x99/0x270
+ __do_softirq+0xc0/0x540
+ run_ksoftirqd+0x1c/0x70
+ smpboot_thread_fn+0x1be/0x270
+ kthread+0x117/0x130
+ ret_from_fork+0x24/0x30
+
+Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
+Cc: Moni Shoua <monis@mellanox.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/sw/rxe/rxe_qp.c | 12 ++++++++++--
+ drivers/infiniband/sw/rxe/rxe_verbs.h | 3 +++
+ 2 files changed, 13 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
+@@ -824,9 +824,9 @@ void rxe_qp_destroy(struct rxe_qp *qp)
+ }
+
+ /* called when the last reference to the qp is dropped */
+-void rxe_qp_cleanup(struct rxe_pool_entry *arg)
++static void rxe_qp_do_cleanup(struct work_struct *work)
+ {
+- struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
++ struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
+
+ rxe_drop_all_mcast_groups(qp);
+
+@@ -859,3 +859,11 @@ void rxe_qp_cleanup(struct rxe_pool_entr
+ kernel_sock_shutdown(qp->sk, SHUT_RDWR);
+ sock_release(qp->sk);
+ }
++
++/* called when the last reference to the qp is dropped */
++void rxe_qp_cleanup(struct rxe_pool_entry *arg)
++{
++ struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
++
++ execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
++}
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
+@@ -35,6 +35,7 @@
+ #define RXE_VERBS_H
+
+ #include <linux/interrupt.h>
++#include <linux/workqueue.h>
+ #include <rdma/rdma_user_rxe.h>
+ #include "rxe_pool.h"
+ #include "rxe_task.h"
+@@ -281,6 +282,8 @@ struct rxe_qp {
+ struct timer_list rnr_nak_timer;
+
+ spinlock_t state_lock; /* guard requester and completer */
++
++ struct execute_work cleanup_work;
+ };
+
+ enum rxe_mem_state {
diff --git a/patches.drivers/i40e-Use-smp_rmb-rather-than-read_barrier_depends.patch b/patches.drivers/i40e-Use-smp_rmb-rather-than-read_barrier_depends.patch
new file mode 100644
index 0000000000..7009e365f4
--- /dev/null
+++ b/patches.drivers/i40e-Use-smp_rmb-rather-than-read_barrier_depends.patch
@@ -0,0 +1,47 @@
+From: Brian King <brking@linux.vnet.ibm.com>
+Date: Fri, 17 Nov 2017 11:05:44 -0600
+Subject: i40e: Use smp_rmb rather than read_barrier_depends
+Patch-mainline: v4.15-rc1
+Git-commit: 52c6912fde0133981ee50ba08808f257829c4c93
+References: bsc#1056658 FATE#322188 bsc#1056662 FATE#322186
+
+The original issue being fixed in this patch was seen with the ixgbe
+driver, but the same issue exists with i40e as well, as the code is
+very similar. read_barrier_depends is not sufficient to ensure
+loads following it are not speculatively loaded out of order
+by the CPU, which can result in stale data being loaded, causing
+potential system crashes.
+
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Brian King <brking@linux.vnet.ibm.com>
+Acked-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
+Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +-
+ drivers/net/ethernet/intel/i40e/i40e_txrx.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -3766,7 +3766,7 @@ static bool i40e_clean_fdir_tx_irq(struc
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+- read_barrier_depends();
++ smp_rmb();
+
+ /* if the descriptor isn't done, no work yet to do */
+ if (!(eop_desc->cmd_type_offset_bsz &
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -759,7 +759,7 @@ static bool i40e_clean_tx_irq(struct i40
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+- read_barrier_depends();
++ smp_rmb();
+
+ i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
+ /* we have caught up to head, no work left to do */
diff --git a/patches.drivers/i40e-don-t-remove-netdev-dev_addr-when-syncing-uc-li.patch b/patches.drivers/i40e-don-t-remove-netdev-dev_addr-when-syncing-uc-li.patch
new file mode 100644
index 0000000000..390d9f9784
--- /dev/null
+++ b/patches.drivers/i40e-don-t-remove-netdev-dev_addr-when-syncing-uc-li.patch
@@ -0,0 +1,94 @@
+From: Jacob Keller <jacob.e.keller@intel.com>
+Date: Wed, 20 Dec 2017 11:04:36 -0500
+Subject: i40e: don't remove netdev->dev_addr when syncing uc list
+Patch-mainline: v4.15-rc8
+Git-commit: 458867b2ca0c987445c5d9adccd1642970e1ba07
+References: bsc#1056658 FATE#322188 bsc#1056662 FATE#322186
+
+In some circumstances, such as with bridging, it is possible that the
+stack will add a devices own MAC address to its unicast address list.
+
+If, later, the stack deletes this address, then the i40e driver will
+receive a request to remove this address.
+
+The driver stores its current MAC address as part of the MAC/VLAN hash
+array, since it is convenient and matches exactly how the hardware
+expects to be told which traffic to receive.
+
+This causes a problem, since for more devices, the MAC address is stored
+separately, and requests to delete a unicast address should not have the
+ability to remove the filter for the MAC address.
+
+Fix this by forcing a check on every address sync to ensure we do not
+remove the device address.
+
+There is a very narrow possibility of a race between .set_mac and
+.set_rx_mode, if we don't change netdev->dev_addr before updating our
+internal MAC list in .set_mac. This might be possible if .set_rx_mode is
+going to remove MAC "XYZ" from the list, at the same time as .set_mac
+changes our dev_addr to MAC "XYZ", we might possibly queue a delete,
+then an add in .set_mac, then queue a delete in .set_rx_mode's
+dev_uc_sync and then update netdev->dev_addr. We can avoid this by
+moving the copy into dev_addr prior to the changes to the MAC filter
+list.
+
+A similar race on the other side does not cause problems, as if we're
+changing our MAC form A to B, and we race with .set_rx_mode, it could
+queue a delete from A, we'd update our address, and allow the delete.
+This seems like a race, but in reality we're about to queue a delete of
+A anyways, so it would not cause any issues.
+
+A race in the initialization code is unlikely because the netdevice has
+not yet been fully initialized and the stack should not be adding or
+removing addresses yet.
+
+Note that we don't (yet) need similar code for the VF driver because it
+does not make use of __dev_uc_sync and __dev_mc_sync, but instead roles
+its own method for handling updates to the MAC/VLAN list, which already
+has code to protect against removal of the hardware address.
+
+Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
+Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 17 ++++++++++++++++-
+ 1 file changed, 16 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -1553,11 +1553,18 @@ static int i40e_set_mac(struct net_devic
+ else
+ netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
+
++ /* Copy the address first, so that we avoid a possible race with
++ * .set_rx_mode(). If we copy after changing the address in the filter
++ * list, we might open ourselves to a narrow race window where
++ * .set_rx_mode could delete our dev_addr filter and prevent traffic
++ * from passing.
++ */
++ ether_addr_copy(netdev->dev_addr, addr->sa_data);
++
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ i40e_del_mac_filter(vsi, netdev->dev_addr);
+ i40e_add_mac_filter(vsi, addr->sa_data);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+- ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ if (vsi->type == I40E_VSI_MAIN) {
+ i40e_status ret;
+
+@@ -1739,6 +1746,14 @@ static int i40e_addr_unsync(struct net_d
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
++ /* Under some circumstances, we might receive a request to delete
++ * our own device address from our uc list. Because we store the
++ * device address in the VSI's MAC/VLAN filter list, we need to ignore
++ * such requests and not delete our device address from this list.
++ */
++ if (ether_addr_equal(addr, netdev->dev_addr))
++ return 0;
++
+ i40e_del_mac_filter(vsi, addr);
+
+ return 0;
diff --git a/patches.drivers/i40e-fix-client-notify-of-VF-reset.patch b/patches.drivers/i40e-fix-client-notify-of-VF-reset.patch
new file mode 100644
index 0000000000..e3c1a2b7bf
--- /dev/null
+++ b/patches.drivers/i40e-fix-client-notify-of-VF-reset.patch
@@ -0,0 +1,42 @@
+From: Alan Brady <alan.brady@intel.com>
+Date: Tue, 22 Aug 2017 06:57:53 -0400
+Subject: i40e: fix client notify of VF reset
+Patch-mainline: v4.15-rc1
+Git-commit: c53d11f669c0e7d0daf46a717b6712ad0b09de99
+References: bsc#1056658 FATE#322188 bsc#1056662 FATE#322186
+
+Currently there is a bug in which the PF driver fails to inform clients
+of a VF reset which then causes clients to leak resources. The bug
+exists because we were incorrectly checking the I40E_VF_STATE_PRE_ENABLE
+bit.
+
+When a VF is first init we go through a reset to initialize variables
+and allocate resources but we don't want to inform clients of this first
+reset since the client isn't fully enabled yet so we set a state bit
+signifying we're in a "pre-enabled" client state. During the first
+reset we should be clearing the bit, allowing all following resets to
+notify the client of the reset when the bit is not set. This patch
+fixes the issue by negating the 'test_and_clear_bit' check to accurately
+reflect the behavior we want.
+
+Signed-off-by: Alan Brady <alan.brady@intel.com>
+Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -1008,8 +1008,8 @@ static void i40e_cleanup_reset_vf(struct
+ set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
+ clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
+ /* Do not notify the client during VF init */
+- if (test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
+- &vf->vf_states))
++ if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
++ &vf->vf_states))
+ i40e_notify_client_of_vf_reset(pf, abs_vf_id);
+ vf->num_vlan = 0;
+ }
diff --git a/patches.drivers/i40e-i40evf-Account-for-frags-split-over-multiple-de.patch b/patches.drivers/i40e-i40evf-Account-for-frags-split-over-multiple-de.patch
new file mode 100644
index 0000000000..73abbc8320
--- /dev/null
+++ b/patches.drivers/i40e-i40evf-Account-for-frags-split-over-multiple-de.patch
@@ -0,0 +1,119 @@
+From: Alexander Duyck <alexander.h.duyck@intel.com>
+Date: Fri, 8 Dec 2017 10:55:04 -0800
+Subject: i40e/i40evf: Account for frags split over multiple descriptors in
+ check linearize
+Patch-mainline: v4.15-rc8
+Git-commit: 248de22e638f10bd5bfc7624a357f940f66ba137
+References: bsc#1056658 FATE#322188 bsc#1056662 FATE#322186
+
+The original code for __i40e_chk_linearize didn't take into account the
+fact that if a fragment is 16K in size or larger it has to be split over 2
+descriptors and the smaller of those 2 descriptors will be on the trailing
+edge of the transmit. As a result we can get into situations where we didn't
+catch requests that could result in a Tx hang.
+
+This patch takes care of that by subtracting the length of all but the
+trailing edge of the stale fragment before we test for sum. By doing this
+we can guarantee that we have all cases covered, including the case of a
+fragment that spans multiple descriptors. We don't need to worry about
+checking the inner portions of this since 12K is the maximum aligned DMA
+size and that is larger than any MSS will ever be since the MTU limit for
+jumbos is something on the order of 9K.
+
+Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
+Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/intel/i40e/i40e_txrx.c | 26 +++++++++++++++++++++++---
+ drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 26 +++++++++++++++++++++++---
+ 2 files changed, 46 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -3048,10 +3048,30 @@ bool __i40e_chk_linearize(struct sk_buff
+ /* Walk through fragments adding latest fragment, testing it, and
+ * then removing stale fragments from the sum.
+ */
+- stale = &skb_shinfo(skb)->frags[0];
+- for (;;) {
++ for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
++ int stale_size = skb_frag_size(stale);
++
+ sum += skb_frag_size(frag++);
+
++ /* The stale fragment may present us with a smaller
++ * descriptor than the actual fragment size. To account
++ * for that we need to remove all the data on the front and
++ * figure out what the remainder would be in the last
++ * descriptor associated with the fragment.
++ */
++ if (stale_size > I40E_MAX_DATA_PER_TXD) {
++ int align_pad = -(stale->page_offset) &
++ (I40E_MAX_READ_REQ_SIZE - 1);
++
++ sum -= align_pad;
++ stale_size -= align_pad;
++
++ do {
++ sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
++ stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
++ } while (stale_size > I40E_MAX_DATA_PER_TXD);
++ }
++
+ /* if sum is negative we failed to make sufficient progress */
+ if (sum < 0)
+ return true;
+@@ -3059,7 +3079,7 @@ bool __i40e_chk_linearize(struct sk_buff
+ if (!nr_frags--)
+ break;
+
+- sum -= skb_frag_size(stale++);
++ sum -= stale_size;
+ }
+
+ return false;
+--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+@@ -2014,10 +2014,30 @@ bool __i40evf_chk_linearize(struct sk_bu
+ /* Walk through fragments adding latest fragment, testing it, and
+ * then removing stale fragments from the sum.
+ */
+- stale = &skb_shinfo(skb)->frags[0];
+- for (;;) {
++ for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
++ int stale_size = skb_frag_size(stale);
++
+ sum += skb_frag_size(frag++);
+
++ /* The stale fragment may present us with a smaller
++ * descriptor than the actual fragment size. To account
++ * for that we need to remove all the data on the front and
++ * figure out what the remainder would be in the last
++ * descriptor associated with the fragment.
++ */
++ if (stale_size > I40E_MAX_DATA_PER_TXD) {
++ int align_pad = -(stale->page_offset) &
++ (I40E_MAX_READ_REQ_SIZE - 1);
++
++ sum -= align_pad;
++ stale_size -= align_pad;
++
++ do {
++ sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
++ stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
++ } while (stale_size > I40E_MAX_DATA_PER_TXD);
++ }
++
+ /* if sum is negative we failed to make sufficient progress */
+ if (sum < 0)
+ return true;
+@@ -2025,7 +2045,7 @@ bool __i40evf_chk_linearize(struct sk_bu
+ if (!nr_frags--)
+ break;
+
+- sum -= skb_frag_size(stale++);
++ sum -= stale_size;
+ }
+
+ return false;
diff --git a/patches.drivers/i40e-i40evf-spread-CPU-affinity-hints-across-online-.patch b/patches.drivers/i40e-i40evf-spread-CPU-affinity-hints-across-online-.patch
new file mode 100644
index 0000000000..bca23bb597
--- /dev/null
+++ b/patches.drivers/i40e-i40evf-spread-CPU-affinity-hints-across-online-.patch
@@ -0,0 +1,125 @@
+From: Jacob Keller <jacob.e.keller@intel.com>
+Date: Tue, 29 Aug 2017 05:32:31 -0400
+Subject: i40e/i40evf: spread CPU affinity hints across online CPUs only
+Patch-mainline: v4.15-rc1
+Git-commit: be664cbefc50977aaefc868ba6a1109ec9b7449d
+References: bsc#1056658 FATE#322188 bsc#1056662 FATE#322186
+
+Currently, when setting up the IRQ for a q_vector, we set an affinity
+hint based on the v_idx of that q_vector. Meaning a loop iterates on
+v_idx, which is an incremental value, and the cpumask is created based
+on this value.
+
+This is a problem in systems with multiple logical CPUs per core (like in
+simultaneous multithreading (SMT) scenarios). If we disable some logical
+CPUs, by turning SMT off for example, we will end up with a sparse
+cpu_online_mask, i.e., only the first CPU in a core is online, and
+incremental filling in q_vector cpumask might lead to multiple offline
+CPUs being assigned to q_vectors.
+
+Example: if we have a system with 8 cores each one containing 8 logical
+CPUs (SMT == 8 in this case), we have 64 CPUs in total. But if SMT is
+disabled, only the 1st CPU in each core remains online, so the
+cpu_online_mask in this case would have only 8 bits set, in a sparse way.
+
+In general case, when SMT is off the cpu_online_mask has only C bits set:
+0, 1*N, 2*N, ..., C*(N-1) where
+C == # of cores;
+N == # of logical CPUs per core.
+In our example, only bits 0, 8, 16, 24, 32, 40, 48, 56 would be set.
+
+Instead, we should only assign hints for CPUs which are online. Even
+better, the kernel already provides a function, cpumask_local_spread()
+which takes an index and returns a CPU, spreading the interrupts across
+local NUMA nodes first, and then remote ones if necessary.
+
+Since we generally have a 1:1 mapping between vectors and CPUs, there
+is no real advantage to spreading vectors to local CPUs first. In order
+to avoid mismatch of the default XPS hints, we'll pass -1 so that it
+spreads across all CPUs without regard to the node locality.
+
+Note that we don't need to change the q_vector->affinity_mask as this is
+initialized to cpu_possible_mask, until an actual affinity is set and
+then notified back to us.
+
+Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
+Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 16 +++++++++++-----
+ drivers/net/ethernet/intel/i40evf/i40evf_main.c | 9 ++++++---
+ 2 files changed, 17 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -2874,14 +2874,15 @@ static void i40e_vsi_free_rx_resources(s
+ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
+ {
+ struct i40e_vsi *vsi = ring->vsi;
++ int cpu;
+
+ if (!ring->q_vector || !ring->netdev)
+ return;
+
+ if ((vsi->tc_config.numtc <= 1) &&
+ !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) {
+- netif_set_xps_queue(ring->netdev,
+- get_cpu_mask(ring->q_vector->v_idx),
++ cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
++ netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
+ ring->queue_index);
+ }
+
+@@ -3471,6 +3472,7 @@ static int i40e_vsi_request_irq_msix(str
+ int tx_int_idx = 0;
+ int vector, err;
+ int irq_num;
++ int cpu;
+
+ for (vector = 0; vector < q_vectors; vector++) {
+ struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
+@@ -3506,10 +3508,14 @@ static int i40e_vsi_request_irq_msix(str
+ q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
+ q_vector->affinity_notify.release = i40e_irq_affinity_release;
+ irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
+- /* get_cpu_mask returns a static constant mask with
+- * a permanent lifetime so it's ok to use here.
++ /* Spread affinity hints out across online CPUs.
++ *
++ * get_cpu_mask returns a static constant mask with
++ * a permanent lifetime so it's ok to pass to
++ * irq_set_affinity_hint without making a copy.
+ */
+- irq_set_affinity_hint(irq_num, get_cpu_mask(q_vector->v_idx));
++ cpu = cpumask_local_spread(q_vector->v_idx, -1);
++ irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
+ }
+
+ vsi->irqs_ready = true;
+--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
++++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+@@ -546,6 +546,7 @@ i40evf_request_traffic_irqs(struct i40ev
+ unsigned int vector, q_vectors;
+ unsigned int rx_int_idx = 0, tx_int_idx = 0;
+ int irq_num, err;
++ int cpu;
+
+ i40evf_irq_disable(adapter);
+ /* Decrement for Other and TCP Timer vectors */
+@@ -584,10 +585,12 @@ i40evf_request_traffic_irqs(struct i40ev
+ q_vector->affinity_notify.release =
+ i40evf_irq_affinity_release;
+ irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
+- /* get_cpu_mask returns a static constant mask with
+- * a permanent lifetime so it's ok to use here.
++ /* Spread the IRQ affinity hints across online CPUs. Note that
++ * get_cpu_mask returns a mask with a permanent lifetime so
++ * it's safe to use as a hint for irq_set_affinity_hint.
+ */
+- irq_set_affinity_hint(irq_num, get_cpu_mask(q_vector->v_idx));
++ cpu = cpumask_local_spread(q_vector->v_idx, -1);
++ irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
+ }
+
+ return 0;
diff --git a/patches.drivers/i40e-use-the-safe-hash-table-iterator-when-deleting-.patch b/patches.drivers/i40e-use-the-safe-hash-table-iterator-when-deleting-.patch
new file mode 100644
index 0000000000..95e6950bba
--- /dev/null
+++ b/patches.drivers/i40e-use-the-safe-hash-table-iterator-when-deleting-.patch
@@ -0,0 +1,41 @@
+From: Lihong Yang <lihong.yang@intel.com>
+Date: Thu, 7 Sep 2017 08:05:46 -0400
+Subject: i40e: use the safe hash table iterator when deleting mac filters
+Patch-mainline: v4.15-rc1
+Git-commit: 784548c40d6f43eff2297220ad7800dc04be03c6
+References: bsc#1056658 FATE#322188 bsc#1056662 FATE#322186
+
+This patch replaces hash_for_each function with hash_for_each_safe
+when calling __i40e_del_filter. The hash_for_each_safe function is
+the right one to use when iterating over a hash table to safely remove
+a hash entry. Otherwise, incorrect values may be read from freed memory.
+
+Detected by CoverityScan, CID 1402048 Read from pointer after free
+
+Signed-off-by: Lihong Yang <lihong.yang@intel.com>
+Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -2779,6 +2779,7 @@ int i40e_ndo_set_vf_mac(struct net_devic
+ struct i40e_mac_filter *f;
+ struct i40e_vf *vf;
+ int ret = 0;
++ struct hlist_node *h;
+ int bkt;
+
+ /* validate the request */
+@@ -2817,7 +2818,7 @@ int i40e_ndo_set_vf_mac(struct net_devic
+ /* Delete all the filters for this VSI - we're going to kill it
+ * anyway.
+ */
+- hash_for_each(vsi->mac_filter_hash, bkt, f, hlist)
++ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
+ __i40e_del_filter(vsi, f);
+
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
diff --git a/patches.drivers/i40evf-Use-smp_rmb-rather-than-read_barrier_depends.patch b/patches.drivers/i40evf-Use-smp_rmb-rather-than-read_barrier_depends.patch
new file mode 100644
index 0000000000..8266406e78
--- /dev/null
+++ b/patches.drivers/i40evf-Use-smp_rmb-rather-than-read_barrier_depends.patch
@@ -0,0 +1,35 @@
+From: Brian King <brking@linux.vnet.ibm.com>
+Date: Fri, 17 Nov 2017 11:05:49 -0600
+Subject: i40evf: Use smp_rmb rather than read_barrier_depends
+Patch-mainline: v4.15-rc1
+Git-commit: f72271e2a0ae4277d53c4053f5eed8bb346ba38a
+References: bsc#1056658 FATE#322188 bsc#1056662 FATE#322186
+
+The original issue being fixed in this patch was seen with the ixgbe
+driver, but the same issue exists with i40evf as well, as the code is
+very similar. read_barrier_depends is not sufficient to ensure
+loads following it are not speculatively loaded out of order
+by the CPU, which can result in stale data being loaded, causing
+potential system crashes.
+
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Brian King <brking@linux.vnet.ibm.com>
+Acked-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
+Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+@@ -179,7 +179,7 @@ static bool i40e_clean_tx_irq(struct i40
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+- read_barrier_depends();
++ smp_rmb();
+
+ i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
+ /* if the descriptor isn't done, no work yet to do */
diff --git a/patches.drivers/scsi-csiostor-add-support-for-32-bit-port-capabiliti.patch b/patches.drivers/scsi-csiostor-add-support-for-32-bit-port-capabiliti.patch
new file mode 100644
index 0000000000..273981befd
--- /dev/null
+++ b/patches.drivers/scsi-csiostor-add-support-for-32-bit-port-capabiliti.patch
@@ -0,0 +1,634 @@
+From: Varun Prakash <varun@chelsio.com>
+Date: Sun, 11 Mar 2018 18:02:13 +0530
+Subject: scsi: csiostor: add support for 32 bit port capabilities
+Patch-mainline: Queued in subsystem maintainer repository
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git
+Git-commit: e1735d9a98ab5593484bbba1933e362a261e0de0
+References: bsc#1084463
+
+32 bit port capabilities are required to support new speeds which can
+not be supported using 16 bit port capabilities.
+
+Signed-off-by: Varun Prakash <varun@chelsio.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/scsi/csiostor/csio_attr.c | 16 +-
+ drivers/scsi/csiostor/csio_hw.c | 275 ++++++++++++++++++++++++++++++++++++-
+ drivers/scsi/csiostor/csio_hw.h | 59 +++++++
+ drivers/scsi/csiostor/csio_lnode.c | 8 +
+ drivers/scsi/csiostor/csio_mb.c | 70 +++++----
+ drivers/scsi/csiostor/csio_mb.h | 9 -
+ 6 files changed, 395 insertions(+), 42 deletions(-)
+
+--- a/drivers/scsi/csiostor/csio_attr.c
++++ b/drivers/scsi/csiostor/csio_attr.c
+@@ -274,12 +274,24 @@ csio_get_host_speed(struct Scsi_Host *sh
+
+ spin_lock_irq(&hw->lock);
+ switch (hw->pport[ln->portid].link_speed) {
+- case FW_PORT_CAP_SPEED_1G:
++ case FW_PORT_CAP32_SPEED_1G:
+ fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
+ break;
+- case FW_PORT_CAP_SPEED_10G:
++ case FW_PORT_CAP32_SPEED_10G:
+ fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+ break;
++ case FW_PORT_CAP32_SPEED_25G:
++ fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
++ break;
++ case FW_PORT_CAP32_SPEED_40G:
++ fc_host_speed(shost) = FC_PORTSPEED_40GBIT;
++ break;
++ case FW_PORT_CAP32_SPEED_50G:
++ fc_host_speed(shost) = FC_PORTSPEED_50GBIT;
++ break;
++ case FW_PORT_CAP32_SPEED_100G:
++ fc_host_speed(shost) = FC_PORTSPEED_100GBIT;
++ break;
+ default:
+ fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+ break;
+--- a/drivers/scsi/csiostor/csio_hw.c
++++ b/drivers/scsi/csiostor/csio_hw.c
+@@ -1409,6 +1409,235 @@ out:
+ return rv;
+ }
+
++static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
++{
++ enum cc_fec cc_fec = 0;
++
++ if (fw_fec & FW_PORT_CAP32_FEC_RS)
++ cc_fec |= FEC_RS;
++ if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
++ cc_fec |= FEC_BASER_RS;
++
++ return cc_fec;
++}
++
++static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
++{
++ fw_port_cap32_t fw_pause = 0;
++
++ if (cc_pause & PAUSE_RX)
++ fw_pause |= FW_PORT_CAP32_FC_RX;
++ if (cc_pause & PAUSE_TX)
++ fw_pause |= FW_PORT_CAP32_FC_TX;
++
++ return fw_pause;
++}
++
++static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
++{
++ fw_port_cap32_t fw_fec = 0;
++
++ if (cc_fec & FEC_RS)
++ fw_fec |= FW_PORT_CAP32_FEC_RS;
++ if (cc_fec & FEC_BASER_RS)
++ fw_fec |= FW_PORT_CAP32_FEC_BASER_RS;
++
++ return fw_fec;
++}
++
++/**
++ * fwcap_to_fwspeed - return highest speed in Port Capabilities
++ * @acaps: advertised Port Capabilities
++ *
++ * Get the highest speed for the port from the advertised Port
++ * Capabilities.
++ */
++fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps)
++{
++ #define TEST_SPEED_RETURN(__caps_speed) \
++ do { \
++ if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \
++ return FW_PORT_CAP32_SPEED_##__caps_speed; \
++ } while (0)
++
++ TEST_SPEED_RETURN(400G);
++ TEST_SPEED_RETURN(200G);
++ TEST_SPEED_RETURN(100G);
++ TEST_SPEED_RETURN(50G);
++ TEST_SPEED_RETURN(40G);
++ TEST_SPEED_RETURN(25G);
++ TEST_SPEED_RETURN(10G);
++ TEST_SPEED_RETURN(1G);
++ TEST_SPEED_RETURN(100M);
++
++ #undef TEST_SPEED_RETURN
++
++ return 0;
++}
++
++/**
++ * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
++ * @caps16: a 16-bit Port Capabilities value
++ *
++ * Returns the equivalent 32-bit Port Capabilities value.
++ */
++fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
++{
++ fw_port_cap32_t caps32 = 0;
++
++ #define CAP16_TO_CAP32(__cap) \
++ do { \
++ if (caps16 & FW_PORT_CAP_##__cap) \
++ caps32 |= FW_PORT_CAP32_##__cap; \
++ } while (0)
++
++ CAP16_TO_CAP32(SPEED_100M);
++ CAP16_TO_CAP32(SPEED_1G);
++ CAP16_TO_CAP32(SPEED_25G);
++ CAP16_TO_CAP32(SPEED_10G);
++ CAP16_TO_CAP32(SPEED_40G);
++ CAP16_TO_CAP32(SPEED_100G);
++ CAP16_TO_CAP32(FC_RX);
++ CAP16_TO_CAP32(FC_TX);
++ CAP16_TO_CAP32(ANEG);
++ CAP16_TO_CAP32(MDIX);
++ CAP16_TO_CAP32(MDIAUTO);
++ CAP16_TO_CAP32(FEC_RS);
++ CAP16_TO_CAP32(FEC_BASER_RS);
++ CAP16_TO_CAP32(802_3_PAUSE);
++ CAP16_TO_CAP32(802_3_ASM_DIR);
++
++ #undef CAP16_TO_CAP32
++
++ return caps32;
++}
++
++/**
++ * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
++ * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
++ *
++ * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
++ * 32-bit Port Capabilities value.
++ */
++fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
++{
++ fw_port_cap32_t linkattr = 0;
++
++ /* The format of the Link Status in the old
++ * 16-bit Port Information message isn't the same as the
++ * 16-bit Port Capabilities bitfield used everywhere else.
++ */
++ if (lstatus & FW_PORT_CMD_RXPAUSE_F)
++ linkattr |= FW_PORT_CAP32_FC_RX;
++ if (lstatus & FW_PORT_CMD_TXPAUSE_F)
++ linkattr |= FW_PORT_CAP32_FC_TX;
++ if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
++ linkattr |= FW_PORT_CAP32_SPEED_100M;
++ if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
++ linkattr |= FW_PORT_CAP32_SPEED_1G;
++ if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
++ linkattr |= FW_PORT_CAP32_SPEED_10G;
++ if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
++ linkattr |= FW_PORT_CAP32_SPEED_25G;
++ if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
++ linkattr |= FW_PORT_CAP32_SPEED_40G;
++ if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
++ linkattr |= FW_PORT_CAP32_SPEED_100G;
++
++ return linkattr;
++}
++
++/**
++ * csio_init_link_config - initialize a link's SW state
++ * @lc: pointer to structure holding the link state
++ * @pcaps: link Port Capabilities
++ * @acaps: link current Advertised Port Capabilities
++ *
++ * Initializes the SW state maintained for each link, including the link's
++ * capabilities and default speed/flow-control/autonegotiation settings.
++ */
++static void csio_init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
++ fw_port_cap32_t acaps)
++{
++ lc->pcaps = pcaps;
++ lc->def_acaps = acaps;
++ lc->lpacaps = 0;
++ lc->speed_caps = 0;
++ lc->speed = 0;
++ lc->requested_fc = PAUSE_RX | PAUSE_TX;
++ lc->fc = lc->requested_fc;
++
++ /*
++ * For Forward Error Control, we default to whatever the Firmware
++ * tells us the Link is currently advertising.
++ */
++ lc->requested_fec = FEC_AUTO;
++ lc->fec = fwcap_to_cc_fec(lc->def_acaps);
++
++ /* If the Port is capable of Auto-Negtotiation, initialize it as
++ * "enabled" and copy over all of the Physical Port Capabilities
++ * to the Advertised Port Capabilities. Otherwise mark it as
++ * Auto-Negotiate disabled and select the highest supported speed
++ * for the link. Note parallel structure in t4_link_l1cfg_core()
++ * and t4_handle_get_port_info().
++ */
++ if (lc->pcaps & FW_PORT_CAP32_ANEG) {
++ lc->acaps = lc->pcaps & ADVERT_MASK;
++ lc->autoneg = AUTONEG_ENABLE;
++ lc->requested_fc |= PAUSE_AUTONEG;
++ } else {
++ lc->acaps = 0;
++ lc->autoneg = AUTONEG_DISABLE;
++ }
++}
++
++static void csio_link_l1cfg(struct link_config *lc, uint16_t fw_caps,
++ uint32_t *rcaps)
++{
++ unsigned int fw_mdi = FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO);
++ fw_port_cap32_t fw_fc, cc_fec, fw_fec, lrcap;
++
++ lc->link_ok = 0;
++
++ /*
++ * Convert driver coding of Pause Frame Flow Control settings into the
++ * Firmware's API.
++ */
++ fw_fc = cc_to_fwcap_pause(lc->requested_fc);
++
++ /*
++ * Convert Common Code Forward Error Control settings into the
++ * Firmware's API. If the current Requested FEC has "Automatic"
++ * (IEEE 802.3) specified, then we use whatever the Firmware
++ * sent us as part of it's IEEE 802.3-based interpratation of
++ * the Transceiver Module EPROM FEC parameters. Otherwise we
++ * use whatever is in the current Requested FEC settings.
++ */
++ if (lc->requested_fec & FEC_AUTO)
++ cc_fec = fwcap_to_cc_fec(lc->def_acaps);
++ else
++ cc_fec = lc->requested_fec;
++ fw_fec = cc_to_fwcap_fec(cc_fec);
++
++ /* Figure out what our Requested Port Capabilities are going to be.
++ * Note parallel structure in t4_handle_get_port_info() and
++ * init_link_config().
++ */
++ if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
++ lrcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec;
++ lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
++ lc->fec = cc_fec;
++ } else if (lc->autoneg == AUTONEG_DISABLE) {
++ lrcap = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
++ lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
++ lc->fec = cc_fec;
++ } else {
++ lrcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
++ }
++
++ *rcaps = lrcap;
++}
++
+ /*
+ * csio_enable_ports - Bring up all available ports.
+ * @hw: HW module.
+@@ -1418,8 +1647,10 @@ static int
+ csio_enable_ports(struct csio_hw *hw)
+ {
+ struct csio_mb *mbp;
++ u16 fw_caps = FW_CAPS_UNKNOWN;
+ enum fw_retval retval;
+ uint8_t portid;
++ fw_port_cap32_t pcaps, acaps, rcaps;
+ int i;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+@@ -1431,9 +1662,39 @@ csio_enable_ports(struct csio_hw *hw)
+ for (i = 0; i < hw->num_pports; i++) {
+ portid = hw->pport[i].portid;
+
++ if (fw_caps == FW_CAPS_UNKNOWN) {
++ u32 param, val;
++
++ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
++ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
++ val = 1;
++
++ csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO,
++ hw->pfn, 0, 1, &param, &val, false,
++ NULL);
++
++ if (csio_mb_issue(hw, mbp)) {
++ csio_err(hw, "failed to issue FW_PARAMS_CMD(r) port:%d\n",
++ portid);
++ mempool_free(mbp, hw->mb_mempool);
++ return -EINVAL;
++ }
++
++ csio_mb_process_read_params_rsp(hw, mbp, &retval, 1,
++ &val);
++ if (retval != FW_SUCCESS) {
++ csio_err(hw, "FW_PARAMS_CMD(r) port:%d failed: 0x%x\n",
++ portid, retval);
++ mempool_free(mbp, hw->mb_mempool);
++ return -EINVAL;
++ }
++
++ fw_caps = val;
++ }
++
+ /* Read PORT information */
+ csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid,
+- false, 0, 0, NULL);
++ false, 0, fw_caps, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "failed to issue FW_PORT_CMD(r) port:%d\n",
+@@ -1442,8 +1703,8 @@ csio_enable_ports(struct csio_hw *hw)
+ return -EINVAL;
+ }
+
+- csio_mb_process_read_port_rsp(hw, mbp, &retval,
+- &hw->pport[i].pcap);
++ csio_mb_process_read_port_rsp(hw, mbp, &retval, fw_caps,
++ &pcaps, &acaps);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_PORT_CMD(r) port:%d failed: 0x%x\n",
+ portid, retval);
+@@ -1451,9 +1712,13 @@ csio_enable_ports(struct csio_hw *hw)
+ return -EINVAL;
+ }
+
++ csio_init_link_config(&hw->pport[i].link_cfg, pcaps, acaps);
++
++ csio_link_l1cfg(&hw->pport[i].link_cfg, fw_caps, &rcaps);
++
+ /* Write back PORT information */
+- csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, true,
+- (PAUSE_RX | PAUSE_TX), hw->pport[i].pcap, NULL);
++ csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid,
++ true, rcaps, fw_caps, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "failed to issue FW_PORT_CMD(w) port:%d\n",
+--- a/drivers/scsi/csiostor/csio_hw.h
++++ b/drivers/scsi/csiostor/csio_hw.h
+@@ -268,8 +268,62 @@ struct csio_vpd {
+ uint8_t id[ID_LEN + 1];
+ };
+
++/* Firmware Port Capabilities types. */
++
++typedef u16 fw_port_cap16_t; /* 16-bit Port Capabilities integral value */
++typedef u32 fw_port_cap32_t; /* 32-bit Port Capabilities integral value */
++
++enum fw_caps {
++ FW_CAPS_UNKNOWN = 0, /* 0'ed out initial state */
++ FW_CAPS16 = 1, /* old Firmware: 16-bit Port Capabilities */
++ FW_CAPS32 = 2, /* new Firmware: 32-bit Port Capabilities */
++};
++
++enum cc_pause {
++ PAUSE_RX = 1 << 0,
++ PAUSE_TX = 1 << 1,
++ PAUSE_AUTONEG = 1 << 2
++};
++
++enum cc_fec {
++ FEC_AUTO = 1 << 0, /* IEEE 802.3 "automatic" */
++ FEC_RS = 1 << 1, /* Reed-Solomon */
++ FEC_BASER_RS = 1 << 2 /* BaseR/Reed-Solomon */
++};
++
++struct link_config {
++ fw_port_cap32_t pcaps; /* link capabilities */
++ fw_port_cap32_t def_acaps; /* default advertised capabilities */
++ fw_port_cap32_t acaps; /* advertised capabilities */
++ fw_port_cap32_t lpacaps; /* peer advertised capabilities */
++
++ fw_port_cap32_t speed_caps; /* speed(s) user has requested */
++ unsigned int speed; /* actual link speed (Mb/s) */
++
++ enum cc_pause requested_fc; /* flow control user has requested */
++ enum cc_pause fc; /* actual link flow control */
++
++ enum cc_fec requested_fec; /* Forward Error Correction: */
++ enum cc_fec fec; /* requested and actual in use */
++
++ unsigned char autoneg; /* autonegotiating? */
++
++ unsigned char link_ok; /* link up? */
++ unsigned char link_down_rc; /* link down reason */
++};
++
++#define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16)
++
++#define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \
++ FW_PORT_CAP32_ANEG)
++
++/* Enable or disable autonegotiation. */
++#define AUTONEG_DISABLE 0x00
++#define AUTONEG_ENABLE 0x01
++
+ struct csio_pport {
+ uint16_t pcap;
++ uint16_t acap;
+ uint8_t portid;
+ uint8_t link_status;
+ uint16_t link_speed;
+@@ -278,6 +332,7 @@ struct csio_pport {
+ uint8_t rsvd1;
+ uint8_t rsvd2;
+ uint8_t rsvd3;
++ struct link_config link_cfg;
+ };
+
+ /* fcoe resource information */
+@@ -582,6 +637,10 @@ int csio_hw_slow_intr_handler(struct csi
+ int csio_handle_intr_status(struct csio_hw *, unsigned int,
+ const struct intr_info *);
+
++fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps);
++fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16);
++fw_port_cap32_t lstatus_to_fwcap(u32 lstatus);
++
+ int csio_hw_start(struct csio_hw *);
+ int csio_hw_stop(struct csio_hw *);
+ int csio_hw_reset(struct csio_hw *);
+--- a/drivers/scsi/csiostor/csio_lnode.c
++++ b/drivers/scsi/csiostor/csio_lnode.c
+@@ -352,6 +352,14 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *h
+ val = htonl(FC_PORTSPEED_1GBIT);
+ else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_10G)
+ val = htonl(FC_PORTSPEED_10GBIT);
++ else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_25G)
++ val = htonl(FC_PORTSPEED_25GBIT);
++ else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_40G)
++ val = htonl(FC_PORTSPEED_40GBIT);
++ else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_50G)
++ val = htonl(FC_PORTSPEED_50GBIT);
++ else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_100G)
++ val = htonl(FC_PORTSPEED_100GBIT);
+ else
+ val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN);
+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED,
+--- a/drivers/scsi/csiostor/csio_mb.c
++++ b/drivers/scsi/csiostor/csio_mb.c
+@@ -326,10 +326,6 @@ csio_mb_caps_config(struct csio_hw *hw,
+ cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_TARGET);
+ }
+
+-#define CSIO_ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
+- FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G |\
+- FW_PORT_CAP_ANEG)
+-
+ /*
+ * csio_mb_port- FW PORT command helper
+ * @hw: The HW structure
+@@ -344,11 +340,10 @@ csio_mb_caps_config(struct csio_hw *hw,
+ */
+ void
+ csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+- uint8_t portid, bool wr, uint32_t fc, uint16_t caps,
++ u8 portid, bool wr, uint32_t fc, uint16_t fw_caps,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+ {
+ struct fw_port_cmd *cmdp = (struct fw_port_cmd *)(mbp->mb);
+- unsigned int lfc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+@@ -358,26 +353,24 @@ csio_mb_port(struct csio_hw *hw, struct
+ FW_PORT_CMD_PORTID_V(portid));
+ if (!wr) {
+ cmdp->action_to_len16 = htonl(
+- FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
++ FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
++ ? FW_PORT_ACTION_GET_PORT_INFO
++ : FW_PORT_ACTION_GET_PORT_INFO32) |
+ FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
+ return;
+ }
+
+ /* Set port */
+ cmdp->action_to_len16 = htonl(
+- FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
++ FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
++ ? FW_PORT_ACTION_L1_CFG
++ : FW_PORT_ACTION_L1_CFG32) |
+ FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
+
+- if (fc & PAUSE_RX)
+- lfc |= FW_PORT_CAP_FC_RX;
+- if (fc & PAUSE_TX)
+- lfc |= FW_PORT_CAP_FC_TX;
+-
+- if (!(caps & FW_PORT_CAP_ANEG))
+- cmdp->u.l1cfg.rcap = htonl((caps & CSIO_ADVERT_MASK) | lfc);
++ if (fw_caps == FW_CAPS16)
++ cmdp->u.l1cfg.rcap = cpu_to_be32(fc);
+ else
+- cmdp->u.l1cfg.rcap = htonl((caps & CSIO_ADVERT_MASK) |
+- lfc | mdi);
++ cmdp->u.l1cfg32.rcap32 = cpu_to_be32(fc);
+ }
+
+ /*
+@@ -390,14 +383,22 @@ csio_mb_port(struct csio_hw *hw, struct
+ */
+ void
+ csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp,
+- enum fw_retval *retval, uint16_t *caps)
++ enum fw_retval *retval, uint16_t fw_caps,
++ u32 *pcaps, u32 *acaps)
+ {
+ struct fw_port_cmd *rsp = (struct fw_port_cmd *)(mbp->mb);
+
+ *retval = FW_CMD_RETVAL_G(ntohl(rsp->action_to_len16));
+
+- if (*retval == FW_SUCCESS)
+- *caps = ntohs(rsp->u.info.pcap);
++ if (*retval == FW_SUCCESS) {
++ if (fw_caps == FW_CAPS16) {
++ *pcaps = fwcaps16_to_caps32(ntohs(rsp->u.info.pcap));
++ *acaps = fwcaps16_to_caps32(ntohs(rsp->u.info.acap));
++ } else {
++ *pcaps = ntohs(rsp->u.info32.pcaps32);
++ *acaps = ntohs(rsp->u.info32.acaps32);
++ }
++ }
+ }
+
+ /*
+@@ -1409,6 +1410,7 @@ csio_mb_fwevt_handler(struct csio_hw *hw
+ uint32_t link_status;
+ uint16_t action;
+ uint8_t mod_type;
++ fw_port_cap32_t linkattr;
+
+ if (opcode == FW_PORT_CMD) {
+ pcmd = (struct fw_port_cmd *)cmd;
+@@ -1416,22 +1418,34 @@ csio_mb_fwevt_handler(struct csio_hw *hw
+ ntohl(pcmd->op_to_portid));
+ action = FW_PORT_CMD_ACTION_G(
+ ntohl(pcmd->action_to_len16));
+- if (action != FW_PORT_ACTION_GET_PORT_INFO) {
++ if (action != FW_PORT_ACTION_GET_PORT_INFO &&
++ action != FW_PORT_ACTION_GET_PORT_INFO32) {
+ csio_err(hw, "Unhandled FW_PORT_CMD action: %u\n",
+ action);
+ return -EINVAL;
+ }
+
+- link_status = ntohl(pcmd->u.info.lstatus_to_modtype);
+- mod_type = FW_PORT_CMD_MODTYPE_G(link_status);
++ if (action == FW_PORT_ACTION_GET_PORT_INFO) {
++ link_status = ntohl(pcmd->u.info.lstatus_to_modtype);
++ mod_type = FW_PORT_CMD_MODTYPE_G(link_status);
++ linkattr = lstatus_to_fwcap(link_status);
++
++ hw->pport[port_id].link_status =
++ FW_PORT_CMD_LSTATUS_G(link_status);
++ } else {
++ link_status =
++ ntohl(pcmd->u.info32.lstatus32_to_cbllen32);
++ mod_type = FW_PORT_CMD_MODTYPE32_G(link_status);
++ linkattr = ntohl(pcmd->u.info32.linkattr32);
++
++ hw->pport[port_id].link_status =
++ FW_PORT_CMD_LSTATUS32_G(link_status);
++ }
+
+- hw->pport[port_id].link_status =
+- FW_PORT_CMD_LSTATUS_G(link_status);
+- hw->pport[port_id].link_speed =
+- FW_PORT_CMD_LSPEED_G(link_status);
++ hw->pport[port_id].link_speed = fwcap_to_fwspeed(linkattr);
+
+ csio_info(hw, "Port:%x - LINK %s\n", port_id,
+- FW_PORT_CMD_LSTATUS_G(link_status) ? "UP" : "DOWN");
++ hw->pport[port_id].link_status ? "UP" : "DOWN");
+
+ if (mod_type != hw->pport[port_id].mod_type) {
+ hw->pport[port_id].mod_type = mod_type;
+--- a/drivers/scsi/csiostor/csio_mb.h
++++ b/drivers/scsi/csiostor/csio_mb.h
+@@ -88,12 +88,6 @@ enum csio_dev_state {
+ FW_PARAMS_PARAM_Y_V(0) | \
+ FW_PARAMS_PARAM_Z_V(0))
+
+-enum {
+- PAUSE_RX = 1 << 0,
+- PAUSE_TX = 1 << 1,
+- PAUSE_AUTONEG = 1 << 2
+-};
+-
+ #define CSIO_INIT_MBP(__mbp, __cp, __tmo, __priv, __fn, __clear) \
+ do { \
+ if (__clear) \
+@@ -188,7 +182,8 @@ void csio_mb_port(struct csio_hw *, stru
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+ void csio_mb_process_read_port_rsp(struct csio_hw *, struct csio_mb *,
+- enum fw_retval *, uint16_t *);
++ enum fw_retval *, uint16_t,
++ uint32_t *, uint32_t *);
+
+ void csio_mb_initialize(struct csio_hw *, struct csio_mb *, uint32_t,
+ void (*)(struct csio_hw *, struct csio_mb *));
diff --git a/series.conf b/series.conf
index 44c40f7180..4444ef316b 100644
--- a/series.conf
+++ b/series.conf
@@ -7513,11 +7513,13 @@
patches.drivers/0001-thunderbolt-Add-polling-mode-for-rings.patch
patches.drivers/0001-thunderbolt-Allocate-ring-HopID-automatically-if-req.patch
patches.drivers/fm10k-ensure-we-process-SM-mbx-when-processing-VF-mb.patch
+ patches.drivers/i40e-fix-client-notify-of-VF-reset.patch
patches.drivers/cxgb4-Update-comment-for-min_mtu.patch
patches.drivers/fm10k-fix-mis-ordered-parameters-in-declaration-for-.patch
patches.drivers/cxgb4-add-new-T6-pci-device-id-s-acd669a8.patch
patches.drivers/cxgb4vf-make-a-couple-of-functions-static.patch
patches.drivers/i40e-add-private-flag-to-control-source-pruning.patch
+ patches.drivers/i40e-i40evf-spread-CPU-affinity-hints-across-online-.patch
patches.drivers/bnxt_en-don-t-consider-building-bnxt_tc.o-if-option-.patch
patches.suse/msft-hv-1477-hv_netvsc-Change-the-hash-level-variable-to-bit-flag.patch
patches.suse/msft-hv-1478-hv_netvsc-Add-ethtool-handler-to-set-and-get-TCP-has.patch
@@ -7540,6 +7542,7 @@
patches.drivers/net-mlx4_core-Fix-cast-warning-in-fw.c.patch
patches.drivers/net-mlx4_en-Use-__force-to-fix-a-sparse-warning-in-T.patch
patches.drivers/ixgbe-fix-use-of-uninitialized-padding.patch
+ patches.drivers/i40e-use-the-safe-hash-table-iterator-when-deleting-.patch
patches.drivers/i40e-Avoid-some-useless-variables-and-initializers-i.patch
patches.fixes/bpf-encapsulate-verifier-log-state-into-a-structure.patch
patches.fixes/bpf-move-global-verifier-log-into-verifier-environme.patch
@@ -7735,6 +7738,8 @@
patches.drivers/iw_cxgb4-Remove-__func__-parameter-from-pr_debug.patch
patches.drivers/iw_cxgb4-change-pr_debug-to-appropriate-log-level.patch
patches.drivers/IB-hfi1-Extend-input-hdr-tracing-for-packet-type.patch
+ patches.drivers/IB-opa_vnic-Properly-return-the-total-MACs-in-UC-MAC.patch
+ patches.drivers/IB-opa_vnic-Properly-clear-Mac-Table-Digest.patch
patches.drivers/RDMA-qedr-Fix-rdma_type-initialization.patch
patches.drivers/iw_cxgb4-allocate-wait-object-for-each-memory-object.patch
patches.drivers/iw_cxgb4-allocate-wait-object-for-each-cq-object.patch
@@ -7755,6 +7760,7 @@
patches.drivers/IB-rxe-put-the-pool-on-allocation-failure.patch
patches.drivers/infiniband-add-MMU-dependency-for-user_mem.patch
patches.drivers/IB-core-Fix-endianness-annotation-in-rdma_is_multica.patch
+ patches.drivers/RDMA-cma-Avoid-triggering-undefined-behavior.patch
patches.drivers/RDMA-cxgb4-Fix-indentation.patch
patches.drivers/RDMA-cxgb4-Remove-the-obsolete-kernel-module-option-.patch
patches.drivers/RDMA-cxgb4-Suppress-gcc-7-fall-through-complaints.patch
@@ -7780,6 +7786,7 @@
patches.drivers/IB-cm-Fix-memory-corruption-in-handling-CM-request.patch
patches.drivers/RDMA-cxgb4-Declare-stag-as-__be32.patch
patches.drivers/bnxt_re-Implement-the-shutdown-hook-of-the-L2-RoCE-d.patch
+ patches.drivers/Ib-hfi1-Return-actual-operational-VLs-in-port-info-q.patch
patches.drivers/IB-mlx4-Add-report-for-RSS-capabilities-by-vendor-ch.patch
patches.drivers/IB-mlx4-Fix-RSS-s-QPC-attributes-assignments.patch
patches.drivers/RDMA-cxgb4-Annotate-r2-and-stag-as-__be32.patch
@@ -8607,10 +8614,12 @@
patches.fixes/bpf-fix-branch-pruning-logic.patch
patches.drivers/i40e-fix-the-calculation-of-VFs-mac-addresses.patch
patches.drivers/ixgbe-Fix-skb-list-corruption-on-Power-systems.patch
+ patches.drivers/i40e-Use-smp_rmb-rather-than-read_barrier_depends.patch
patches.drivers/ixgbevf-Use-smp_rmb-rather-than-read_barrier_depends.patch
patches.drivers/igbvf-Use-smp_rmb-rather-than-read_barrier_depends.patch
patches.drivers/igb-Use-smp_rmb-rather-than-read_barrier_depends.patch
patches.drivers/fm10k-Use-smp_rmb-rather-than-read_barrier_depends.patch
+ patches.drivers/i40evf-Use-smp_rmb-rather-than-read_barrier_depends.patch
patches.drivers/0001-iwlwifi-mvm-support-version-7-of-the-SCAN_REQ_UMAC-F.patch
patches.drivers/iwlwifi-fix-PCI-IDs-and-configuration-mapping-for-90.patch
patches.drivers/iwlwifi-fix-firmware-names-for-9000-and-A000-series-.patch
@@ -8803,6 +8812,8 @@
patches.drivers/i40iw-Correct-ARP-index-mask.patch
patches.drivers/i40iw-Move-MPA-request-event-for-loopback-after-conn.patch
patches.drivers/i40iw-Notify-user-of-established-connection-after-QP.patch
+ patches.drivers/RDMA-cma-Make-sure-that-PSN-is-not-over-max-allowed.patch
+ patches.drivers/IB-core-Init-subsys-if-compiled-to-vmlinuz-core.patch
patches.drivers/IB-core-Only-enforce-security-for-InfiniBand.patch
patches.fixes/scsi-aacraid-Check-for-PCI-state-of-device-in-a-gene.patch
patches.fixes/scsi-aacraid-Perform-initialization-reset-only-once.patch
@@ -9202,6 +9213,7 @@
patches.drivers/IB-mlx4-Fix-mlx4_ib_alloc_mr-error-flow.patch
patches.drivers/IB-ipoib-Fix-race-condition-in-neigh-creation.patch
patches.drivers/RDMA-netlink-Fix-locking-around-__ib_get_device_by_i.patch
+ patches.drivers/IB-srpt-Disable-RDMA-access-by-the-initiator.patch
patches.drivers/IB-srpt-Fix-ACL-lookup-during-login.patch
patches.drivers/net-sched-Fix-update-of-lastuse-in-act-modules-imple.patch
patches.fixes/RDS-Heap-OOB-write-in-rds_message_alloc_sgs.patch
@@ -9210,6 +9222,8 @@
patches.drivers/cxgb4-Fix-FW-flash-errors.patch
patches.drivers/net-ena-unmask-MSI-X-only-after-device-initializatio.patch
patches.drivers/net-ena-fix-error-handling-in-ena_down-sequence.patch
+ patches.drivers/i40e-i40evf-Account-for-frags-split-over-multiple-de.patch
+ patches.drivers/i40e-don-t-remove-netdev-dev_addr-when-syncing-uc-li.patch
patches.drivers/nl80211-Check-for-the-required-netlink-attribute-pre
patches.drivers/mac80211-mesh-drop-frames-appearing-to-be-from-us
patches.fixes/RDS-null-pointer-dereference-in-rds_atomic_free_op.patch
@@ -9742,6 +9756,7 @@
patches.drivers/IB-hfi1-Fix-infinite-loop-in-8051-command-error-path.patch
patches.drivers/net-mlx5-Fix-race-for-multiple-RoCE-enable.patch
patches.drivers/IB-srpt-Remove-an-unused-structure-member.patch
+ patches.drivers/RDMA-rxe-Fix-a-race-condition-related-to-the-QP-erro.patch
patches.drivers/RDMA-cma-Use-the-right-net-namespace-for-the-rdma_cm.patch
patches.drivers/RDMA-cma-Provide-a-function-to-set-RoCE-path-record-.patch
patches.drivers/RDMA-cma-ucma-Simplify-and-rename-rdma_set_ib_paths.patch
@@ -9758,6 +9773,8 @@
patches.drivers/RDMA-bnxt_re-Add-support-for-MRs-with-Huge-pages.patch
patches.drivers/RDMA-bnxt_re-expose-detailed-stats-retrieved-from-HW.patch
patches.drivers/RDMA-bnxt_re-Add-SRQ-support-for-Broadcom-adapters.patch
+ patches.drivers/RDMA-rxe-Fix-a-race-condition-in-rxe_requester.patch
+ patches.drivers/RDMA-rxe-Fix-rxe_qp_cleanup.patch
patches.drivers/RDMA-cma-Introduce-API-to-read-GIDs-for-multiple-tra.patch
patches.drivers/RDMA-cma-Check-existence-of-netdevice-during-port-va.patch
patches.drivers/IB-umad-Fix-use-of-unprotected-device-pointer.patch
@@ -10995,12 +11012,13 @@
patches.drivers/drm-i915-dp-limit-DP-link-rate-based-on-VBT-on-CNL
patches.drivers/drm-i915-cnl-WaPipeControlBefore3DStateSamplePattern
- # mkp/scsi 4.17/scsi-queue
- patches.drivers/scsi-lpfc-make-several-unions-static-fix-non-ansi.patch
-
# jj/linux-apparmor apparmor-next
patches.apparmor/apparmor-fix-resource-audit-messages-when-auditing-peer.patch
+ # mkp/scsi 4.17/scsi-queue
+ patches.drivers/scsi-lpfc-make-several-unions-static-fix-non-ansi.patch
+ patches.drivers/scsi-csiostor-add-support-for-32-bit-port-capabiliti.patch
+
# out-of-tree patches
patches.drivers/qla2xxx-Remove-unneeded-message-and-minor-cleanup-fo.patch
patches.drivers/qla2xxx-Set-IIDMA-and-fcport-state-before-qla_nvme_r.patch