Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichal Kubecek <mkubecek@suse.cz>2019-11-05 21:30:17 +0100
committerMichal Kubecek <mkubecek@suse.cz>2019-11-05 21:30:17 +0100
commite44b1dd3a32f7093dc21747b4a486d2d609c2855 (patch)
tree18b930dbcf6363cd0e269acbd4b2839a8b9c753b
parentc7eada4a2549120e1c9b384a126922cbc8ebf80e (diff)
parent6c0057f3917349accaad0515bb64b08ac280bdb5 (diff)
Merge branch 'users/tbogendoerfer/SLE15-SP2/for-next' into SLE15-SP2
Pull networking backports and fixes from Thomas Bogendoerfer.
-rw-r--r--patches.suse/IB-core-Avoid-deadlock-during-netlink-message-handli.patch291
-rw-r--r--patches.suse/IB-core-Use-rdma_read_gid_l2_fields-to-compare-GID-L.patch52
-rw-r--r--patches.suse/IB-hfi1-Avoid-excessive-retry-for-TID-RDMA-READ-requ.patch58
-rw-r--r--patches.suse/IB-hfi1-Use-a-common-pad-buffer-for-9B-and-16B-packe.patch88
-rw-r--r--patches.suse/RDMA-hns-Prevent-memory-leaks-of-eq-buf_list.patch35
-rw-r--r--patches.suse/RDMA-iw_cxgb4-Avoid-freeing-skb-twice-in-arp-failure.patch39
-rw-r--r--patches.suse/RDMA-iwcm-move-iw_rem_ref-calls-out-of-spinlock.patch171
-rw-r--r--patches.suse/RDMA-mlx5-Clear-old-rate-limit-when-closing-QP.patch40
-rw-r--r--patches.suse/RDMA-mlx5-Use-irq-xarray-locking-for-mkey_table.patch86
-rw-r--r--patches.suse/RDMA-nldev-Skip-counter-if-port-doesn-t-match.patch35
-rw-r--r--patches.suse/RDMA-qedr-Fix-reported-firmware-version.patch40
-rw-r--r--patches.suse/RDMA-siw-free-siw_base_qp-in-kref-release-routine.patch56
-rw-r--r--patches.suse/RDMA-uverbs-Prevent-potential-underflow.patch52
-rw-r--r--patches.suse/SUNRPC-Destroy-the-back-channel-when-we-destroy-the-.patch61
-rw-r--r--patches.suse/SUNRPC-The-RDMA-back-channel-mustn-t-disappear-while.patch39
-rw-r--r--patches.suse/SUNRPC-The-TCP-back-channel-mustn-t-disappear-while-.patch50
-rw-r--r--patches.suse/bnxt_en-Adjust-the-time-to-wait-before-polling-firmw.patch43
-rw-r--r--patches.suse/bnxt_en-Avoid-disabling-pci-device-in-bnxt_remove_on.patch34
-rw-r--r--patches.suse/bnxt_en-Fix-devlink-NVRAM-related-byte-order-related.patch136
-rw-r--r--patches.suse/bnxt_en-Fix-the-size-of-devlink-MSIX-parameters.patch121
-rw-r--r--patches.suse/bnxt_en-Minor-formatting-changes-in-FW-devlink_healt.patch54
-rw-r--r--patches.suse/bonding-fix-unexpected-IFF_BONDING-bit-unset.patch93
-rw-r--r--patches.suse/bonding-fix-using-uninitialized-mode_lock.patch106
-rw-r--r--patches.suse/bonding-use-dynamic-lockdep-key-instead-of-subclass.patch131
-rw-r--r--patches.suse/bpf-lwtunnel-Fix-reroute-supplying-invalid-dst.patch50
-rw-r--r--patches.suse/cxgb4-fix-panic-when-attaching-to-ULD-fail.patch90
-rw-r--r--patches.suse/cxgb4-request-the-TX-CIDX-updates-to-status-page.patch48
-rw-r--r--patches.suse/e1000-fix-memory-leaks.patch54
-rw-r--r--patches.suse/erspan-fix-the-tun_info-options_len-check-for-erspan.patch49
-rw-r--r--patches.suse/i40e-Fix-receive-buffer-starvation-for-AF_XDP.patch35
-rw-r--r--patches.suse/igb-Enable-media-autosense-for-the-i350.patch43
-rw-r--r--patches.suse/igb-Fix-constant-media-auto-sense-switching-when-no-.patch41
-rw-r--r--patches.suse/igb-igc-Don-t-warn-on-fatal-read-failures-when-the-d.patch114
-rw-r--r--patches.suse/inet-stop-leaking-jiffies-on-the-wire.patch104
-rw-r--r--patches.suse/ip6erspan-remove-the-incorrect-mtu-limit-for-ip6ersp.patch34
-rw-r--r--patches.suse/ipv4-fix-IPSKB_FRAG_PMTU-handling-with-fragmentation.patch106
-rw-r--r--patches.suse/ipv4-fix-route-update-on-metric-change.patch63
-rw-r--r--patches.suse/ipv6-include-net-addrconf.h-for-missing-declarations.patch35
-rw-r--r--patches.suse/ipvs-don-t-ignore-errors-in-case-refcounting-ip_vs-m.patch223
-rw-r--r--patches.suse/ipvs-move-old_secure_tcp-into-struct-netns_ipvs.patch108
-rw-r--r--patches.suse/iw_cxgb4-fix-ECN-check-on-the-passive-accept.patch66
-rw-r--r--patches.suse/ixgbe-Remove-duplicate-clear_bit-call.patch27
-rw-r--r--patches.suse/keys-Fix-memory-leak-in-copy_net_ns.patch57
-rw-r--r--patches.suse/mlxsw-core-Unpublish-devlink-parameters-during-reloa.patch51
-rw-r--r--patches.suse/mlxsw-spectrum_trap-Push-Ethernet-header-before-repo.patch35
-rw-r--r--patches.suse/net-Zeroing-the-structure-ethtool_wolinfo-in-ethtool.patch34
-rw-r--r--patches.suse/net-add-READ-WRITE-_ONCE-annotations-on-rskq_accept_.patch65
-rw-r--r--patches.suse/net-add-READ_ONCE-annotation-in-__skb_wait_for_more_.patch77
-rw-r--r--patches.suse/net-add-skb_queue_empty_lockless.patch91
-rw-r--r--patches.suse/net-annotate-accesses-to-sk-sk_incoming_cpu.patch156
-rw-r--r--patches.suse/net-annotate-lockless-accesses-to-sk-sk_napi_id.patch96
-rw-r--r--patches.suse/net-annotate-sk-sk_rcvlowat-lockless-reads.patch83
-rw-r--r--patches.suse/net-avoid-possible-false-sharing-in-sk_leave_memory_.patch41
-rw-r--r--patches.suse/net-cavium-Use-the-correct-style-for-SPDX-License-Id.patch32
-rw-r--r--patches.suse/net-cls_bpf-fix-NULL-deref-on-offload-filter-removal.patch44
-rw-r--r--patches.suse/net-core-add-generic-lockdep-keys.patch896
-rw-r--r--patches.suse/net-core-add-ignore-flag-to-netdev_adjacent-structur.patch446
-rw-r--r--patches.suse/net-core-limit-nested-device-depth.patch448
-rw-r--r--patches.suse/net-ensure-correct-skb-tstamp-in-various-fragmenters.patch138
-rw-r--r--patches.suse/net-fix-installing-orphaned-programs.patch46
-rw-r--r--patches.suse/net-fix-sk_page_frag-recursion-from-memory-reclaim.patch158
-rw-r--r--patches.suse/net-flow_dissector-switch-to-siphash.patch374
-rw-r--r--patches.suse/net-hisilicon-Fix-Trying-to-free-already-free-IRQ.patch52
-rw-r--r--patches.suse/net-hisilicon-Fix-ping-latency-when-deal-with-high-t.patch74
-rw-r--r--patches.suse/net-hns3-fix-mis-counting-IRQ-vector-numbers-issue.patch233
-rw-r--r--patches.suse/net-mlx4_core-Dynamically-set-guaranteed-amount-of-c.patch92
-rw-r--r--patches.suse/net-mlx5-DR-Allow-insertion-of-duplicate-rules.patch42
-rw-r--r--patches.suse/net-mlx5-Fix-NULL-pointer-dereference-in-extended-de.patch32
-rw-r--r--patches.suse/net-mlx5-Fix-rtable-reference-leak.patch56
-rw-r--r--patches.suse/net-mlx5-fix-memory-leak-in-mlx5_fw_fatal_reporter_d.patch31
-rw-r--r--patches.suse/net-mlx5-prevent-memory-leak-in-mlx5_fpga_conn_creat.patch32
-rw-r--r--patches.suse/net-mlx5e-Determine-source-port-properly-for-vlan-pu.patch64
-rw-r--r--patches.suse/net-mlx5e-Don-t-store-direct-pointer-to-action-s-tun.patch267
-rw-r--r--patches.suse/net-mlx5e-Fix-ethtool-self-test-link-speed.patch58
-rw-r--r--patches.suse/net-mlx5e-Fix-handling-of-compressed-CQEs-in-case-of.patch55
-rw-r--r--patches.suse/net-mlx5e-Initialize-on-stack-link-modes-bitmap.patch29
-rw-r--r--patches.suse/net-mlx5e-Only-skip-encap-flows-update-when-encap-in.patch34
-rw-r--r--patches.suse/net-mlx5e-Remove-incorrect-match-criteria-assignment.patch31
-rw-r--r--patches.suse/net-mlx5e-Replace-kfree-with-kvfree-when-free-vhca-s.patch35
-rw-r--r--patches.suse/net-mlx5e-TX-Fix-consumer-index-of-error-cqe-dump.patch34
-rw-r--r--patches.suse/net-mlx5e-Tx-Fix-assumption-of-single-WQEBB-of-NOP-i.patch50
-rw-r--r--patches.suse/net-mlx5e-Tx-Zero-memset-WQE-info-struct-upon-update.patch34
-rw-r--r--patches.suse/net-mlx5e-kTLS-Enhance-TX-resync-flow.patch247
-rw-r--r--patches.suse/net-mlx5e-kTLS-Fix-missing-SQ-edge-fill.patch75
-rw-r--r--patches.suse/net-mlx5e-kTLS-Fix-page-refcnt-leak-in-TX-resync-err.patch53
-rw-r--r--patches.suse/net-mlx5e-kTLS-Limit-DUMP-wqe-size.patch163
-rw-r--r--patches.suse/net-mlx5e-kTLS-Release-reference-on-DUMPed-fragments.patch128
-rw-r--r--patches.suse/net-mlx5e-kTLS-Remove-unneeded-cipher-type-checks.patch41
-rw-r--r--patches.suse/net-mlx5e-kTLS-Save-a-copy-of-the-crypto-info.patch78
-rw-r--r--patches.suse/net-mlx5e-kTLS-Save-by-value-copy-of-the-record-frag.patch54
-rw-r--r--patches.suse/net-mlx5e-kTLS-Save-only-the-frag-page-to-release-at.patch121
-rw-r--r--patches.suse/net-mlx5e-kTLS-Size-of-a-Dump-WQE-is-fixed.patch105
-rw-r--r--patches.suse/net-netem-correct-the-parent-s-backlog-when-corrupte.patch33
-rw-r--r--patches.suse/net-netem-fix-error-path-for-corrupted-GSO-frames.patch67
-rw-r--r--patches.suse/net-phylink-Fix-phylink_dbg-macro.patch47
-rw-r--r--patches.suse/net-remove-unnecessary-variables-and-callback.patch412
-rw-r--r--patches.suse/net-reorder-struct-net-fields-to-avoid-false-sharing.patch108
-rw-r--r--patches.suse/net-rtnetlink-fix-a-typo-fbd-fdb.patch30
-rw-r--r--patches.suse/net-sch_generic-Use-pfifo_fast-as-fallback-scheduler.patch70
-rw-r--r--patches.suse/net-silence-KCSAN-warnings-about-sk-sk_backlog.len-r.patch123
-rw-r--r--patches.suse/net-silence-KCSAN-warnings-around-sk_add_backlog-cal.patch174
-rw-r--r--patches.suse/net-use-skb_queue_empty_lockless-in-busy-poll-contex.patch79
-rw-r--r--patches.suse/net-use-skb_queue_empty_lockless-in-poll-handlers.patch246
-rw-r--r--patches.suse/netdevsim-Fix-use-after-free-during-device-dismantle.patch151
-rw-r--r--patches.suse/netfilter-conntrack-avoid-possible-false-sharing.patch88
-rw-r--r--patches.suse/netfilter-nf_flow_table-set-timeout-before-insertion.patch56
-rw-r--r--patches.suse/netfilter-nft_payload-fix-missing-check-for-matching.patch133
-rw-r--r--patches.suse/netns-fix-GFP-flags-in-rtnl_net_notifyid.patch284
-rw-r--r--patches.suse/netns-fix-NLM_F_ECHO-mechanism-for-RTM_NEWNSID.patch85
-rw-r--r--patches.suse/qed-Optimize-execution-time-for-nvm-attributes-confi.patch85
-rw-r--r--patches.suse/qed-fix-spelling-mistake-queuess-queues.patch27
-rw-r--r--patches.suse/rxrpc-Fix-call-ref-leak.patch44
-rw-r--r--patches.suse/rxrpc-Fix-trace-after-put-looking-at-the-put-call-re.patch157
-rw-r--r--patches.suse/rxrpc-Fix-trace-after-put-looking-at-the-put-connect.patch159
-rw-r--r--patches.suse/rxrpc-Fix-trace-after-put-looking-at-the-put-peer-re.patch104
-rw-r--r--patches.suse/rxrpc-rxrpc_peer-needs-to-hold-a-ref-on-the-rxrpc_lo.patch67
-rw-r--r--patches.suse/sctp-add-chunks-to-sk_backlog-when-the-newsk-sk_sock.patch120
-rw-r--r--patches.suse/tcp-add-rcu-protection-around-tp-fastopen_rsk.patch275
-rw-r--r--patches.suse/tcp-annotate-lockless-access-to-tcp_memory_pressure.patch54
-rw-r--r--patches.suse/tcp-annotate-sk-sk_rcvbuf-lockless-reads.patch145
-rw-r--r--patches.suse/tcp-annotate-sk-sk_sndbuf-lockless-reads.patch169
-rw-r--r--patches.suse/tcp-annotate-sk-sk_wmem_queued-lockless-reads.patch220
-rw-r--r--patches.suse/tcp-annotate-tp-copied_seq-lockless-reads.patch200
-rw-r--r--patches.suse/tcp-annotate-tp-rcv_nxt-lockless-reads.patch184
-rw-r--r--patches.suse/tcp-annotate-tp-snd_nxt-lockless-reads.patch106
-rw-r--r--patches.suse/tcp-annotate-tp-urg_seq-lockless-reads.patch54
-rw-r--r--patches.suse/tcp-annotate-tp-write_seq-lockless-reads.patch238
-rw-r--r--patches.suse/tcp-fix-a-possible-lockdep-splat-in-tcp_done.patch40
-rw-r--r--patches.suse/tcp-increase-tcp_max_syn_backlog-max-value.patch55
-rw-r--r--patches.suse/team-fix-nested-locking-lockdep-warning.patch140
-rw-r--r--patches.suse/udp-fix-data-race-in-udp_set_dev_scratch.patch100
-rw-r--r--patches.suse/udp-use-skb_queue_empty_lockless.patch96
-rw-r--r--patches.suse/vxlan-add-adjacent-link-to-limit-depth-level.patch210
-rw-r--r--patches.suse/vxlan-check-tun_info-options_len-properly.patch36
-rw-r--r--patches.suse/vxlan-fix-unexpected-failure-of-vxlan_changelink.patch56
-rw-r--r--patches.suse/xdp-Handle-device-unregister-for-devmap_hash-map-typ.patch74
-rw-r--r--patches.suse/xdp-Prevent-overflow-in-devmap_hash-cost-calculation.patch37
-rw-r--r--patches.suse/xsk-Fix-registration-of-Rx-only-sockets.patch53
-rw-r--r--series.conf138
-rw-r--r--supported.conf4
140 files changed, 14600 insertions, 2 deletions
diff --git a/patches.suse/IB-core-Avoid-deadlock-during-netlink-message-handli.patch b/patches.suse/IB-core-Avoid-deadlock-during-netlink-message-handli.patch
new file mode 100644
index 0000000000..8c8ea44325
--- /dev/null
+++ b/patches.suse/IB-core-Avoid-deadlock-during-netlink-message-handli.patch
@@ -0,0 +1,291 @@
+From: Parav Pandit <parav@mellanox.com>
+Date: Tue, 15 Oct 2019 11:07:33 +0300
+Subject: IB/core: Avoid deadlock during netlink message handling
+Patch-mainline: v5.4-rc6
+Git-commit: 549af00833028b5803528553a4743e0cd1fdbee9
+References: jsc#SLE-8449
+
+When rdmacm module is not loaded, and when netlink message is received to
+get char device info, it results into a deadlock due to recursive locking
+of rdma_nl_mutex with the below call sequence.
+
+[..]
+ rdma_nl_rcv()
+ mutex_lock()
+ [..]
+ rdma_nl_rcv_msg()
+ ib_get_client_nl_info()
+ request_module()
+ iw_cm_init()
+ rdma_nl_register()
+ mutex_lock(); <- Deadlock, acquiring mutex again
+
+Due to above call sequence, following call trace and deadlock is observed.
+
+ kernel: __mutex_lock+0x35e/0x860
+ kernel: ? __mutex_lock+0x129/0x860
+ kernel: ? rdma_nl_register+0x1a/0x90 [ib_core]
+ kernel: rdma_nl_register+0x1a/0x90 [ib_core]
+ kernel: ? 0xffffffffc029b000
+ kernel: iw_cm_init+0x34/0x1000 [iw_cm]
+ kernel: do_one_initcall+0x67/0x2d4
+ kernel: ? kmem_cache_alloc_trace+0x1ec/0x2a0
+ kernel: do_init_module+0x5a/0x223
+ kernel: load_module+0x1998/0x1e10
+ kernel: ? __symbol_put+0x60/0x60
+ kernel: __do_sys_finit_module+0x94/0xe0
+ kernel: do_syscall_64+0x5a/0x270
+ kernel: entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+ process stack trace:
+ [<0>] __request_module+0x1c9/0x460
+ [<0>] ib_get_client_nl_info+0x5e/0xb0 [ib_core]
+ [<0>] nldev_get_chardev+0x1ac/0x320 [ib_core]
+ [<0>] rdma_nl_rcv_msg+0xeb/0x1d0 [ib_core]
+ [<0>] rdma_nl_rcv+0xcd/0x120 [ib_core]
+ [<0>] netlink_unicast+0x179/0x220
+ [<0>] netlink_sendmsg+0x2f6/0x3f0
+ [<0>] sock_sendmsg+0x30/0x40
+ [<0>] ___sys_sendmsg+0x27a/0x290
+ [<0>] __sys_sendmsg+0x58/0xa0
+ [<0>] do_syscall_64+0x5a/0x270
+ [<0>] entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+To overcome this deadlock and to allow multiple netlink messages to
+progress in parallel, following scheme is implemented.
+
+1. Split the lock protecting the cb_table into a per-index lock, and make
+ it a rwlock. This lock is used to ensure no callbacks are running after
+ unregistration returns. Since a module will not be registered once it
+ is already running callbacks, this avoids the deadlock.
+
+2. Use smp_store_release() to update the cb_table during registration so
+ that no lock is required. This avoids lockdep problems with thinking
+ all the rwsems are the same lock class.
+
+Fixes: 0e2d00eb6fd45 ("RDMA: Add NLDEV_GET_CHARDEV to allow char dev discovery and autoload")
+Link: https://lore.kernel.org/r/20191015080733.18625-1-leon@kernel.org
+Signed-off-by: Parav Pandit <parav@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/core/core_priv.h | 1
+ drivers/infiniband/core/device.c | 2
+ drivers/infiniband/core/netlink.c | 107 +++++++++++++++++-------------------
+ 3 files changed, 56 insertions(+), 54 deletions(-)
+
+--- a/drivers/infiniband/core/core_priv.h
++++ b/drivers/infiniband/core/core_priv.h
+@@ -199,6 +199,7 @@ void ib_mad_cleanup(void);
+ int ib_sa_init(void);
+ void ib_sa_cleanup(void);
+
++void rdma_nl_init(void);
+ void rdma_nl_exit(void);
+
+ int ib_nl_handle_resolve_resp(struct sk_buff *skb,
+--- a/drivers/infiniband/core/device.c
++++ b/drivers/infiniband/core/device.c
+@@ -2716,6 +2716,8 @@ static int __init ib_core_init(void)
+ goto err_comp_unbound;
+ }
+
++ rdma_nl_init();
++
+ ret = addr_init();
+ if (ret) {
+ pr_warn("Could't init IB address resolution\n");
+--- a/drivers/infiniband/core/netlink.c
++++ b/drivers/infiniband/core/netlink.c
+@@ -42,9 +42,12 @@
+ #include <linux/module.h>
+ #include "core_priv.h"
+
+-static DEFINE_MUTEX(rdma_nl_mutex);
+ static struct {
+- const struct rdma_nl_cbs *cb_table;
++ const struct rdma_nl_cbs *cb_table;
++ /* Synchronizes between ongoing netlink commands and netlink client
++ * unregistration.
++ */
++ struct rw_semaphore sem;
+ } rdma_nl_types[RDMA_NL_NUM_CLIENTS];
+
+ bool rdma_nl_chk_listeners(unsigned int group)
+@@ -75,70 +78,53 @@ static bool is_nl_msg_valid(unsigned int
+ return (op < max_num_ops[type]) ? true : false;
+ }
+
+-static bool
+-is_nl_valid(const struct sk_buff *skb, unsigned int type, unsigned int op)
++static const struct rdma_nl_cbs *
++get_cb_table(const struct sk_buff *skb, unsigned int type, unsigned int op)
+ {
+ const struct rdma_nl_cbs *cb_table;
+
+- if (!is_nl_msg_valid(type, op))
+- return false;
+-
+ /*
+ * Currently only NLDEV client is supporting netlink commands in
+ * non init_net net namespace.
+ */
+ if (sock_net(skb->sk) != &init_net && type != RDMA_NL_NLDEV)
+- return false;
++ return NULL;
+
+- if (!rdma_nl_types[type].cb_table) {
+- mutex_unlock(&rdma_nl_mutex);
+- request_module("rdma-netlink-subsys-%d", type);
+- mutex_lock(&rdma_nl_mutex);
+- }
++ cb_table = READ_ONCE(rdma_nl_types[type].cb_table);
++ if (!cb_table) {
++ /*
++ * Didn't get valid reference of the table, attempt module
++ * load once.
++ */
++ up_read(&rdma_nl_types[type].sem);
+
+- cb_table = rdma_nl_types[type].cb_table;
++ request_module("rdma-netlink-subsys-%d", type);
+
++ down_read(&rdma_nl_types[type].sem);
++ cb_table = READ_ONCE(rdma_nl_types[type].cb_table);
++ }
+ if (!cb_table || (!cb_table[op].dump && !cb_table[op].doit))
+- return false;
+- return true;
++ return NULL;
++ return cb_table;
+ }
+
+ void rdma_nl_register(unsigned int index,
+ const struct rdma_nl_cbs cb_table[])
+ {
+- mutex_lock(&rdma_nl_mutex);
+- if (!is_nl_msg_valid(index, 0)) {
+- /*
+- * All clients are not interesting in success/failure of
+- * this call. They want to see the print to error log and
+- * continue their initialization. Print warning for them,
+- * because it is programmer's error to be here.
+- */
+- mutex_unlock(&rdma_nl_mutex);
+- WARN(true,
+- "The not-valid %u index was supplied to RDMA netlink\n",
+- index);
++ if (WARN_ON(!is_nl_msg_valid(index, 0)) ||
++ WARN_ON(READ_ONCE(rdma_nl_types[index].cb_table)))
+ return;
+- }
+
+- if (rdma_nl_types[index].cb_table) {
+- mutex_unlock(&rdma_nl_mutex);
+- WARN(true,
+- "The %u index is already registered in RDMA netlink\n",
+- index);
+- return;
+- }
+-
+- rdma_nl_types[index].cb_table = cb_table;
+- mutex_unlock(&rdma_nl_mutex);
++ /* Pairs with the READ_ONCE in is_nl_valid() */
++ smp_store_release(&rdma_nl_types[index].cb_table, cb_table);
+ }
+ EXPORT_SYMBOL(rdma_nl_register);
+
+ void rdma_nl_unregister(unsigned int index)
+ {
+- mutex_lock(&rdma_nl_mutex);
++ down_write(&rdma_nl_types[index].sem);
+ rdma_nl_types[index].cb_table = NULL;
+- mutex_unlock(&rdma_nl_mutex);
++ up_write(&rdma_nl_types[index].sem);
+ }
+ EXPORT_SYMBOL(rdma_nl_unregister);
+
+@@ -170,15 +156,21 @@ static int rdma_nl_rcv_msg(struct sk_buf
+ unsigned int index = RDMA_NL_GET_CLIENT(type);
+ unsigned int op = RDMA_NL_GET_OP(type);
+ const struct rdma_nl_cbs *cb_table;
++ int err = -EINVAL;
+
+- if (!is_nl_valid(skb, index, op))
++ if (!is_nl_msg_valid(index, op))
+ return -EINVAL;
+
+- cb_table = rdma_nl_types[index].cb_table;
++ down_read(&rdma_nl_types[index].sem);
++ cb_table = get_cb_table(skb, index, op);
++ if (!cb_table)
++ goto done;
+
+ if ((cb_table[op].flags & RDMA_NL_ADMIN_PERM) &&
+- !netlink_capable(skb, CAP_NET_ADMIN))
+- return -EPERM;
++ !netlink_capable(skb, CAP_NET_ADMIN)) {
++ err = -EPERM;
++ goto done;
++ }
+
+ /*
+ * LS responses overload the 0x100 (NLM_F_ROOT) flag. Don't
+@@ -186,8 +178,8 @@ static int rdma_nl_rcv_msg(struct sk_buf
+ */
+ if (index == RDMA_NL_LS) {
+ if (cb_table[op].doit)
+- return cb_table[op].doit(skb, nlh, extack);
+- return -EINVAL;
++ err = cb_table[op].doit(skb, nlh, extack);
++ goto done;
+ }
+ /* FIXME: Convert IWCM to properly handle doit callbacks */
+ if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_IWCM) {
+@@ -195,14 +187,15 @@ static int rdma_nl_rcv_msg(struct sk_buf
+ .dump = cb_table[op].dump,
+ };
+ if (c.dump)
+- return netlink_dump_start(skb->sk, skb, nlh, &c);
+- return -EINVAL;
++ err = netlink_dump_start(skb->sk, skb, nlh, &c);
++ goto done;
+ }
+
+ if (cb_table[op].doit)
+- return cb_table[op].doit(skb, nlh, extack);
+-
+- return 0;
++ err = cb_table[op].doit(skb, nlh, extack);
++done:
++ up_read(&rdma_nl_types[index].sem);
++ return err;
+ }
+
+ /*
+@@ -263,9 +256,7 @@ skip:
+
+ static void rdma_nl_rcv(struct sk_buff *skb)
+ {
+- mutex_lock(&rdma_nl_mutex);
+ rdma_nl_rcv_skb(skb, &rdma_nl_rcv_msg);
+- mutex_unlock(&rdma_nl_mutex);
+ }
+
+ int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid)
+@@ -297,6 +288,14 @@ int rdma_nl_multicast(struct net *net, s
+ }
+ EXPORT_SYMBOL(rdma_nl_multicast);
+
++void rdma_nl_init(void)
++{
++ int idx;
++
++ for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++)
++ init_rwsem(&rdma_nl_types[idx].sem);
++}
++
+ void rdma_nl_exit(void)
+ {
+ int idx;
diff --git a/patches.suse/IB-core-Use-rdma_read_gid_l2_fields-to-compare-GID-L.patch b/patches.suse/IB-core-Use-rdma_read_gid_l2_fields-to-compare-GID-L.patch
new file mode 100644
index 0000000000..17470b8944
--- /dev/null
+++ b/patches.suse/IB-core-Use-rdma_read_gid_l2_fields-to-compare-GID-L.patch
@@ -0,0 +1,52 @@
+From: Parav Pandit <parav@mellanox.com>
+Date: Wed, 2 Oct 2019 15:17:50 +0300
+Subject: IB/core: Use rdma_read_gid_l2_fields to compare GID L2 fields
+Patch-mainline: v5.4-rc6
+Git-commit: 777a8b32bc0f9bb25848a025f72a9febc30d9033
+References: jsc#SLE-8449
+
+Current code tries to derive VLAN ID and compares it with GID
+attribute for matching entry. This raw search fails on macvlan
+netdevice as its not a VLAN device, but its an upper device of a VLAN
+netdevice.
+
+Due to this limitation, incoming QP1 packets fail to match in the
+GID table. Such packets are dropped.
+
+Hence, to support it, use the existing rdma_read_gid_l2_fields()
+that takes care of diffferent device types.
+
+Fixes: dbf727de7440 ("IB/core: Use GID table in AH creation and dmac resolution")
+Signed-off-by: Parav Pandit <parav@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Link: https://lore.kernel.org/r/20191002121750.17313-1-leon@kernel.org
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/core/verbs.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/drivers/infiniband/core/verbs.c
++++ b/drivers/infiniband/core/verbs.c
+@@ -662,16 +662,17 @@ static bool find_gid_index(const union i
+ void *context)
+ {
+ struct find_gid_index_context *ctx = context;
++ u16 vlan_id = 0xffff;
++ int ret;
+
+ if (ctx->gid_type != gid_attr->gid_type)
+ return false;
+
+- if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) ||
+- (is_vlan_dev(gid_attr->ndev) &&
+- vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id))
++ ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
++ if (ret)
+ return false;
+
+- return true;
++ return ctx->vlan_id == vlan_id;
+ }
+
+ static const struct ib_gid_attr *
diff --git a/patches.suse/IB-hfi1-Avoid-excessive-retry-for-TID-RDMA-READ-requ.patch b/patches.suse/IB-hfi1-Avoid-excessive-retry-for-TID-RDMA-READ-requ.patch
new file mode 100644
index 0000000000..321eb5ba9c
--- /dev/null
+++ b/patches.suse/IB-hfi1-Avoid-excessive-retry-for-TID-RDMA-READ-requ.patch
@@ -0,0 +1,58 @@
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Fri, 4 Oct 2019 16:40:35 -0400
+Subject: IB/hfi1: Avoid excessive retry for TID RDMA READ request
+Patch-mainline: v5.4-rc6
+Git-commit: 9ed5bd7d22241ad232fd3a5be404e83eb6cadc04
+References: jsc#SLE-8449
+
+A TID RDMA READ request could be retried under one of the following
+conditions:
+- The RC retry timer expires;
+- A later TID RDMA READ RESP packet is received before the next
+ expected one.
+For the latter, under normal conditions, the PSN in IB space is used
+for comparison. More specifically, the IB PSN in the incoming TID RDMA
+READ RESP packet is compared with the last IB PSN of a given TID RDMA
+READ request to determine if the request should be retried. This is
+similar to the retry logic for noraml RDMA READ request.
+
+However, if a TID RDMA READ RESP packet is lost due to congestion,
+header suppresion will be disabled and each incoming packet will raise
+an interrupt until the hardware flow is reloaded. Under this condition,
+each packet KDETH PSN will be checked by software against r_next_psn
+and a retry will be requested if the packet KDETH PSN is later than
+r_next_psn. Since each TID RDMA READ segment could have up to 64
+packets and each TID RDMA READ request could have many segments, we
+could make far more retries under such conditions, and thus leading to
+RETRY_EXC_ERR status.
+
+This patch fixes the issue by removing the retry when the incoming
+packet KDETH PSN is later than r_next_psn. Instead, it resorts to
+RC timer and normal IB PSN comparison for any request retry.
+
+Fixes: 9905bf06e890 ("IB/hfi1: Add functions to receive TID RDMA READ response")
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Link: https://lore.kernel.org/r/20191004204035.26542.41684.stgit@awfm-01.aw.intel.com
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/tid_rdma.c | 5 -----
+ 1 file changed, 5 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
++++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
+@@ -2736,11 +2736,6 @@ static bool handle_read_kdeth_eflags(str
+ diff = cmp_psn(psn,
+ flow->flow_state.r_next_psn);
+ if (diff > 0) {
+- if (!(qp->r_flags & RVT_R_RDMAR_SEQ))
+- restart_tid_rdma_read_req(rcd,
+- qp,
+- wqe);
+-
+ /* Drop the packet.*/
+ goto s_unlock;
+ } else if (diff < 0) {
diff --git a/patches.suse/IB-hfi1-Use-a-common-pad-buffer-for-9B-and-16B-packe.patch b/patches.suse/IB-hfi1-Use-a-common-pad-buffer-for-9B-and-16B-packe.patch
new file mode 100644
index 0000000000..de1e74c273
--- /dev/null
+++ b/patches.suse/IB-hfi1-Use-a-common-pad-buffer-for-9B-and-16B-packe.patch
@@ -0,0 +1,88 @@
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Date: Fri, 4 Oct 2019 16:49:34 -0400
+Subject: IB/hfi1: Use a common pad buffer for 9B and 16B packets
+Patch-mainline: v5.4-rc6
+Git-commit: 22bb13653410424d9fce8d447506a41f8292f22f
+References: jsc#SLE-8449
+
+There is no reason for a different pad buffer for the two
+packet types.
+
+Expand the current buffer allocation to allow for both
+packet types.
+
+Fixes: f8195f3b14a0 ("IB/hfi1: Eliminate allocation while atomic")
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Reviewed-by: Kaike Wan <kaike.wan@intel.com>
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Link: https://lore.kernel.org/r/20191004204934.26838.13099.stgit@awfm-01.aw.intel.com
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/sdma.c | 5 +++--
+ drivers/infiniband/hw/hfi1/verbs.c | 10 ++++------
+ 2 files changed, 7 insertions(+), 8 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/sdma.c
++++ b/drivers/infiniband/hw/hfi1/sdma.c
+@@ -65,6 +65,7 @@
+ #define SDMA_DESCQ_CNT 2048
+ #define SDMA_DESC_INTR 64
+ #define INVALID_TAIL 0xffff
++#define SDMA_PAD max_t(size_t, MAX_16B_PADDING, sizeof(u32))
+
+ static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
+ module_param(sdma_descq_cnt, uint, S_IRUGO);
+@@ -1296,7 +1297,7 @@ void sdma_clean(struct hfi1_devdata *dd,
+ struct sdma_engine *sde;
+
+ if (dd->sdma_pad_dma) {
+- dma_free_coherent(&dd->pcidev->dev, 4,
++ dma_free_coherent(&dd->pcidev->dev, SDMA_PAD,
+ (void *)dd->sdma_pad_dma,
+ dd->sdma_pad_phys);
+ dd->sdma_pad_dma = NULL;
+@@ -1491,7 +1492,7 @@ int sdma_init(struct hfi1_devdata *dd, u
+ }
+
+ /* Allocate memory for pad */
+- dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32),
++ dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, SDMA_PAD,
+ &dd->sdma_pad_phys, GFP_KERNEL);
+ if (!dd->sdma_pad_dma) {
+ dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
+--- a/drivers/infiniband/hw/hfi1/verbs.c
++++ b/drivers/infiniband/hw/hfi1/verbs.c
+@@ -147,9 +147,6 @@ static int pio_wait(struct rvt_qp *qp,
+ /* Length of buffer to create verbs txreq cache name */
+ #define TXREQ_NAME_LEN 24
+
+-/* 16B trailing buffer */
+-static const u8 trail_buf[MAX_16B_PADDING];
+-
+ static uint wss_threshold = 80;
+ module_param(wss_threshold, uint, S_IRUGO);
+ MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
+@@ -820,8 +817,8 @@ static int build_verbs_tx_desc(
+
+ /* add icrc, lt byte, and padding to flit */
+ if (extra_bytes)
+- ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
+- (void *)trail_buf, extra_bytes);
++ ret = sdma_txadd_daddr(sde->dd, &tx->txreq,
++ sde->dd->sdma_pad_phys, extra_bytes);
+
+ bail_txadd:
+ return ret;
+@@ -1089,7 +1086,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *q
+ }
+ /* add icrc, lt byte, and padding to flit */
+ if (extra_bytes)
+- seg_pio_copy_mid(pbuf, trail_buf, extra_bytes);
++ seg_pio_copy_mid(pbuf, ppd->dd->sdma_pad_dma,
++ extra_bytes);
+
+ seg_pio_copy_end(pbuf);
+ }
diff --git a/patches.suse/RDMA-hns-Prevent-memory-leaks-of-eq-buf_list.patch b/patches.suse/RDMA-hns-Prevent-memory-leaks-of-eq-buf_list.patch
new file mode 100644
index 0000000000..cf657e053e
--- /dev/null
+++ b/patches.suse/RDMA-hns-Prevent-memory-leaks-of-eq-buf_list.patch
@@ -0,0 +1,35 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 26 Oct 2019 14:56:35 +0800
+Subject: RDMA/hns: Prevent memory leaks of eq->buf_list
+Patch-mainline: v5.4-rc6
+Git-commit: b681a0529968d2261aa15d7a1e78801b2c06bb07
+References: jsc#SLE-8449
+
+eq->buf_list->buf and eq->buf_list should also be freed when eqe_hop_num
+is set to 0, or there will be memory leaks.
+
+Fixes: a5073d6054f7 ("RDMA/hns: Add eq support of hip08")
+Link: https://lore.kernel.org/r/1572072995-11277-3-git-send-email-liweihang@hisilicon.com
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Weihang Li <liweihang@hisilicon.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -5389,9 +5389,9 @@ static void hns_roce_v2_free_eq(struct h
+ return;
+ }
+
+- if (eq->buf_list)
+- dma_free_coherent(hr_dev->dev, buf_chk_sz,
+- eq->buf_list->buf, eq->buf_list->map);
++ dma_free_coherent(hr_dev->dev, buf_chk_sz, eq->buf_list->buf,
++ eq->buf_list->map);
++ kfree(eq->buf_list);
+ }
+
+ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
diff --git a/patches.suse/RDMA-iw_cxgb4-Avoid-freeing-skb-twice-in-arp-failure.patch b/patches.suse/RDMA-iw_cxgb4-Avoid-freeing-skb-twice-in-arp-failure.patch
new file mode 100644
index 0000000000..0effa9c12f
--- /dev/null
+++ b/patches.suse/RDMA-iw_cxgb4-Avoid-freeing-skb-twice-in-arp-failure.patch
@@ -0,0 +1,39 @@
+From: Potnuri Bharat Teja <bharat@chelsio.com>
+Date: Fri, 25 Oct 2019 18:04:40 +0530
+Subject: RDMA/iw_cxgb4: Avoid freeing skb twice in arp failure case
+Patch-mainline: v5.4-rc6
+Git-commit: d4934f45693651ea15357dd6c7c36be28b6da884
+References: jsc#SLE-8392
+
+_put_ep_safe() and _put_pass_ep_safe() free the skb before it is freed by
+process_work(). fix double free by freeing the skb only in process_work().
+
+Fixes: 1dad0ebeea1c ("iw_cxgb4: Avoid touch after free error in ARP failure handlers")
+Link: https://lore.kernel.org/r/1572006880-5800-1-git-send-email-bharat@chelsio.com
+Signed-off-by: Dakshaja Uppalapati <dakshaja@chelsio.com>
+Signed-off-by: Potnuri Bharat Teja <bharat@chelsio.com>
+Reviewed-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/cxgb4/cm.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -495,7 +495,6 @@ static int _put_ep_safe(struct c4iw_dev
+
+ ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
+ release_ep_resources(ep);
+- kfree_skb(skb);
+ return 0;
+ }
+
+@@ -506,7 +505,6 @@ static int _put_pass_ep_safe(struct c4iw
+ ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
+ c4iw_put_ep(&ep->parent_ep->com);
+ release_ep_resources(ep);
+- kfree_skb(skb);
+ return 0;
+ }
+
diff --git a/patches.suse/RDMA-iwcm-move-iw_rem_ref-calls-out-of-spinlock.patch b/patches.suse/RDMA-iwcm-move-iw_rem_ref-calls-out-of-spinlock.patch
new file mode 100644
index 0000000000..605a7bb8fe
--- /dev/null
+++ b/patches.suse/RDMA-iwcm-move-iw_rem_ref-calls-out-of-spinlock.patch
@@ -0,0 +1,171 @@
+From: Krishnamraju Eraparaju <krishna2@chelsio.com>
+Date: Mon, 7 Oct 2019 15:56:27 +0530
+Subject: RDMA/iwcm: move iw_rem_ref() calls out of spinlock
+Patch-mainline: v5.4-rc6
+Git-commit: 54102dd410b037a4d7984e6a5826fb212c2f8aca
+References: jsc#SLE-8449
+
+kref release routines usually perform memory release operations,
+hence, they should not be called with spinlocks held.
+one such case is: SIW kref release routine siw_free_qp(), which
+can sleep via vfree() while freeing queue memory.
+
+Hence, all iw_rem_ref() calls in IWCM are moved out of spinlocks.
+
+Fixes: 922a8e9fb2e0 ("RDMA: iWARP Connection Manager.")
+Signed-off-by: Krishnamraju Eraparaju <krishna2@chelsio.com>
+Reviewed-by: Bernard Metzler <bmt@zurich.ibm.com>
+Link: https://lore.kernel.org/r/20191007102627.12568-1-krishna2@chelsio.com
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/core/iwcm.c | 52 ++++++++++++++++++++++-------------------
+ 1 file changed, 29 insertions(+), 23 deletions(-)
+
+--- a/drivers/infiniband/core/iwcm.c
++++ b/drivers/infiniband/core/iwcm.c
+@@ -372,6 +372,7 @@ EXPORT_SYMBOL(iw_cm_disconnect);
+ static void destroy_cm_id(struct iw_cm_id *cm_id)
+ {
+ struct iwcm_id_private *cm_id_priv;
++ struct ib_qp *qp;
+ unsigned long flags;
+
+ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+@@ -389,6 +390,9 @@ static void destroy_cm_id(struct iw_cm_i
+ set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags);
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
++ qp = cm_id_priv->qp;
++ cm_id_priv->qp = NULL;
++
+ switch (cm_id_priv->state) {
+ case IW_CM_STATE_LISTEN:
+ cm_id_priv->state = IW_CM_STATE_DESTROYING;
+@@ -401,7 +405,7 @@ static void destroy_cm_id(struct iw_cm_i
+ cm_id_priv->state = IW_CM_STATE_DESTROYING;
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ /* Abrupt close of the connection */
+- (void)iwcm_modify_qp_err(cm_id_priv->qp);
++ (void)iwcm_modify_qp_err(qp);
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ break;
+ case IW_CM_STATE_IDLE:
+@@ -426,11 +430,9 @@ static void destroy_cm_id(struct iw_cm_i
+ BUG();
+ break;
+ }
+- if (cm_id_priv->qp) {
+- cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
+- cm_id_priv->qp = NULL;
+- }
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
++ if (qp)
++ cm_id_priv->id.device->ops.iw_rem_ref(qp);
+
+ if (cm_id->mapped) {
+ iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
+@@ -671,11 +673,11 @@ int iw_cm_accept(struct iw_cm_id *cm_id,
+ BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
+ cm_id_priv->state = IW_CM_STATE_IDLE;
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+- if (cm_id_priv->qp) {
+- cm_id->device->ops.iw_rem_ref(qp);
+- cm_id_priv->qp = NULL;
+- }
++ qp = cm_id_priv->qp;
++ cm_id_priv->qp = NULL;
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
++ if (qp)
++ cm_id->device->ops.iw_rem_ref(qp);
+ clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
+ wake_up_all(&cm_id_priv->connect_wait);
+ }
+@@ -696,7 +698,7 @@ int iw_cm_connect(struct iw_cm_id *cm_id
+ struct iwcm_id_private *cm_id_priv;
+ int ret;
+ unsigned long flags;
+- struct ib_qp *qp;
++ struct ib_qp *qp = NULL;
+
+ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+
+@@ -730,13 +732,13 @@ int iw_cm_connect(struct iw_cm_id *cm_id
+ return 0; /* success */
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+- if (cm_id_priv->qp) {
+- cm_id->device->ops.iw_rem_ref(qp);
+- cm_id_priv->qp = NULL;
+- }
++ qp = cm_id_priv->qp;
++ cm_id_priv->qp = NULL;
+ cm_id_priv->state = IW_CM_STATE_IDLE;
+ err:
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
++ if (qp)
++ cm_id->device->ops.iw_rem_ref(qp);
+ clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
+ wake_up_all(&cm_id_priv->connect_wait);
+ return ret;
+@@ -878,6 +880,7 @@ static int cm_conn_est_handler(struct iw
+ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
+ struct iw_cm_event *iw_event)
+ {
++ struct ib_qp *qp = NULL;
+ unsigned long flags;
+ int ret;
+
+@@ -896,11 +899,13 @@ static int cm_conn_rep_handler(struct iw
+ cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
+ } else {
+ /* REJECTED or RESET */
+- cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
++ qp = cm_id_priv->qp;
+ cm_id_priv->qp = NULL;
+ cm_id_priv->state = IW_CM_STATE_IDLE;
+ }
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
++ if (qp)
++ cm_id_priv->id.device->ops.iw_rem_ref(qp);
+ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
+
+ if (iw_event->private_data_len)
+@@ -942,21 +947,18 @@ static void cm_disconnect_handler(struct
+ static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
+ struct iw_cm_event *iw_event)
+ {
++ struct ib_qp *qp;
+ unsigned long flags;
+- int ret = 0;
++ int ret = 0, notify_event = 0;
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
++ qp = cm_id_priv->qp;
++ cm_id_priv->qp = NULL;
+
+- if (cm_id_priv->qp) {
+- cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
+- cm_id_priv->qp = NULL;
+- }
+ switch (cm_id_priv->state) {
+ case IW_CM_STATE_ESTABLISHED:
+ case IW_CM_STATE_CLOSING:
+ cm_id_priv->state = IW_CM_STATE_IDLE;
+- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+- ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
+- spin_lock_irqsave(&cm_id_priv->lock, flags);
++ notify_event = 1;
+ break;
+ case IW_CM_STATE_DESTROYING:
+ break;
+@@ -965,6 +967,10 @@ static int cm_close_handler(struct iwcm_
+ }
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+
++ if (qp)
++ cm_id_priv->id.device->ops.iw_rem_ref(qp);
++ if (notify_event)
++ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
+ return ret;
+ }
+
diff --git a/patches.suse/RDMA-mlx5-Clear-old-rate-limit-when-closing-QP.patch b/patches.suse/RDMA-mlx5-Clear-old-rate-limit-when-closing-QP.patch
new file mode 100644
index 0000000000..b042ec9cc4
--- /dev/null
+++ b/patches.suse/RDMA-mlx5-Clear-old-rate-limit-when-closing-QP.patch
@@ -0,0 +1,40 @@
+From: Rafi Wiener <rafiw@mellanox.com>
+Date: Wed, 2 Oct 2019 15:02:43 +0300
+Subject: RDMA/mlx5: Clear old rate limit when closing QP
+Patch-mainline: v5.4-rc6
+Git-commit: c8973df2da677f375f8b12b6eefca2f44c8884d5
+References: jsc#SLE-8446
+
+Before QP is closed it changes to ERROR state, when this happens
+the QP was left with old rate limit that was already removed from
+the table.
+
+Fixes: 7d29f349a4b9 ("IB/mlx5: Properly adjust rate limit on QP state transitions")
+Signed-off-by: Rafi Wiener <rafiw@mellanox.com>
+Signed-off-by: Oleg Kuporosov <olegk@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Link: https://lore.kernel.org/r/20191002120243.16971-1-leon@kernel.org
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/mlx5/qp.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -3249,10 +3249,12 @@ static int modify_raw_packet_qp_sq(
+ }
+
+ /* Only remove the old rate after new rate was set */
+- if ((old_rl.rate &&
+- !mlx5_rl_are_equal(&old_rl, &new_rl)) ||
+- (new_state != MLX5_SQC_STATE_RDY))
++ if ((old_rl.rate && !mlx5_rl_are_equal(&old_rl, &new_rl)) ||
++ (new_state != MLX5_SQC_STATE_RDY)) {
+ mlx5_rl_remove_rate(dev, &old_rl);
++ if (new_state != MLX5_SQC_STATE_RDY)
++ memset(&new_rl, 0, sizeof(new_rl));
++ }
+
+ ibqp->rl = new_rl;
+ sq->state = new_state;
diff --git a/patches.suse/RDMA-mlx5-Use-irq-xarray-locking-for-mkey_table.patch b/patches.suse/RDMA-mlx5-Use-irq-xarray-locking-for-mkey_table.patch
new file mode 100644
index 0000000000..d63074bfbe
--- /dev/null
+++ b/patches.suse/RDMA-mlx5-Use-irq-xarray-locking-for-mkey_table.patch
@@ -0,0 +1,86 @@
+From: Jason Gunthorpe <jgg@mellanox.com>
+Date: Thu, 24 Oct 2019 23:49:13 +0000
+Subject: RDMA/mlx5: Use irq xarray locking for mkey_table
+Patch-mainline: v5.4-rc6
+Git-commit: 1524b12a6e02a85264af4ed208b034a2239ef374
+References: jsc#SLE-8446
+
+The mkey_table xarray is touched by the reg_mr_callback() function which
+is called from a hard irq. Thus all other uses of xa_lock must use the
+_irq variants.
+
+ WARNING: inconsistent lock state
+ 5.4.0-rc1 #12 Not tainted
+ --------------------------------
+ inconsistent {IN-HARDIRQ-W} -> {HARDIRQ-ON-W} usage.
+ python3/343 [HC0[0]:SC0[0]:HE1:SE1] takes:
+ ffff888182be1d40 (&(&xa->xa_lock)->rlock#3){?.-.}, at: xa_erase+0x12/0x30
+ {IN-HARDIRQ-W} state was registered at:
+ lock_acquire+0xe1/0x200
+ _raw_spin_lock_irqsave+0x35/0x50
+ reg_mr_callback+0x2dd/0x450 [mlx5_ib]
+ mlx5_cmd_exec_cb_handler+0x2c/0x70 [mlx5_core]
+ mlx5_cmd_comp_handler+0x355/0x840 [mlx5_core]
+ [..]
+
+ Possible unsafe locking scenario:
+
+ CPU0
+ ----
+ lock(&(&xa->xa_lock)->rlock#3);
+ <Interrupt>
+ lock(&(&xa->xa_lock)->rlock#3);
+
+ *** DEADLOCK ***
+
+ 2 locks held by python3/343:
+ #0: ffff88818eb4bd38 (&uverbs_dev->disassociate_srcu){....}, at: ib_uverbs_ioctl+0xe5/0x1e0 [ib_uverbs]
+ #1: ffff888176c76d38 (&file->hw_destroy_rwsem){++++}, at: uobj_destroy+0x2d/0x90 [ib_uverbs]
+
+ stack backtrace:
+ CPU: 3 PID: 343 Comm: python3 Not tainted 5.4.0-rc1 #12
+ Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.12.1-0-ga5cab58e9a3f-prebuilt.qemu.org 04/01/2014
+ Call Trace:
+ dump_stack+0x86/0xca
+ print_usage_bug.cold.50+0x2e5/0x355
+ mark_lock+0x871/0xb50
+ ? match_held_lock+0x20/0x250
+ ? check_usage_forwards+0x240/0x240
+ __lock_acquire+0x7de/0x23a0
+ ? __kasan_check_read+0x11/0x20
+ ? mark_lock+0xae/0xb50
+ ? mark_held_locks+0xb0/0xb0
+ ? find_held_lock+0xca/0xf0
+ lock_acquire+0xe1/0x200
+ ? xa_erase+0x12/0x30
+ _raw_spin_lock+0x2a/0x40
+ ? xa_erase+0x12/0x30
+ xa_erase+0x12/0x30
+ mlx5_ib_dealloc_mw+0x55/0xa0 [mlx5_ib]
+ uverbs_dealloc_mw+0x3c/0x70 [ib_uverbs]
+ uverbs_free_mw+0x1a/0x20 [ib_uverbs]
+ destroy_hw_idr_uobject+0x49/0xa0 [ib_uverbs]
+ [..]
+
+Fixes: 0417791536ae ("RDMA/mlx5: Add missing synchronize_srcu() for MW cases")
+Link: https://lore.kernel.org/r/20191024234910.GA9038@ziepe.ca
+Acked-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/mlx5/mr.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -1967,8 +1967,8 @@ int mlx5_ib_dealloc_mw(struct ib_mw *mw)
+ int err;
+
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
+- xa_erase(&dev->mdev->priv.mkey_table,
+- mlx5_base_mkey(mmw->mmkey.key));
++ xa_erase_irq(&dev->mdev->priv.mkey_table,
++ mlx5_base_mkey(mmw->mmkey.key));
+ /*
+ * pagefault_single_data_segment() may be accessing mmw under
+ * SRCU if the user bound an ODP MR to this MW.
diff --git a/patches.suse/RDMA-nldev-Skip-counter-if-port-doesn-t-match.patch b/patches.suse/RDMA-nldev-Skip-counter-if-port-doesn-t-match.patch
new file mode 100644
index 0000000000..e628cc493f
--- /dev/null
+++ b/patches.suse/RDMA-nldev-Skip-counter-if-port-doesn-t-match.patch
@@ -0,0 +1,35 @@
+From: Mark Zhang <markz@mellanox.com>
+Date: Sun, 20 Oct 2019 09:28:00 +0300
+Subject: RDMA/nldev: Skip counter if port doesn't match
+Patch-mainline: v5.4-rc6
+Git-commit: a15542bb72a48042f5df7475893d46f725f5f9fb
+References: jsc#SLE-8449
+
+The counter resource should return -EAGAIN if it was requested for a
+different port, this is similar to how QP works if the users provides a
+port filter.
+
+Otherwise port filtering in netlink will return broken counter nests.
+
+Fixes: c4ffee7c9bdb ("RDMA/netlink: Implement counter dumpit calback")
+Link: https://lore.kernel.org/r/20191020062800.8065-1-leon@kernel.org
+Signed-off-by: Mark Zhang <markz@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Reviewed-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/core/nldev.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/core/nldev.c
++++ b/drivers/infiniband/core/nldev.c
+@@ -778,7 +778,7 @@ static int fill_res_counter_entry(struct
+ container_of(res, struct rdma_counter, res);
+
+ if (port && port != counter->port)
+- return 0;
++ return -EAGAIN;
+
+ /* Dump it even query failed */
+ rdma_counter_query_stats(counter);
diff --git a/patches.suse/RDMA-qedr-Fix-reported-firmware-version.patch b/patches.suse/RDMA-qedr-Fix-reported-firmware-version.patch
new file mode 100644
index 0000000000..bac8178b19
--- /dev/null
+++ b/patches.suse/RDMA-qedr-Fix-reported-firmware-version.patch
@@ -0,0 +1,40 @@
+From: Kamal Heib <kamalheib1@gmail.com>
+Date: Tue, 8 Oct 2019 00:07:30 +0300
+Subject: RDMA/qedr: Fix reported firmware version
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Patch-mainline: v5.4-rc6
+Git-commit: b806c94ee44e53233b8ce6c92d9078d9781786a5
+References: jsc#SLE-8215
+
+Remove spaces from the reported firmware version string.
+Actual value:
+$ cat /sys/class/infiniband/qedr0/fw_ver
+8. 37. 7. 0
+
+Expected value:
+$ cat /sys/class/infiniband/qedr0/fw_ver
+8.37.7.0
+
+Fixes: ec72fce401c6 ("qedr: Add support for RoCE HW init")
+Signed-off-by: Kamal Heib <kamalheib1@gmail.com>
+Acked-by: Michal Kalderon <michal.kalderon@marvell.com>
+Link: https://lore.kernel.org/r/20191007210730.7173-1-kamalheib1@gmail.com
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/qedr/main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/qedr/main.c
++++ b/drivers/infiniband/hw/qedr/main.c
+@@ -76,7 +76,7 @@ static void qedr_get_dev_fw_str(struct i
+ struct qedr_dev *qedr = get_qedr_dev(ibdev);
+ u32 fw_ver = (u32)qedr->attr.fw_ver;
+
+- snprintf(str, IB_FW_VERSION_NAME_MAX, "%d. %d. %d. %d",
++ snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
+ (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
+ (fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
+ }
diff --git a/patches.suse/RDMA-siw-free-siw_base_qp-in-kref-release-routine.patch b/patches.suse/RDMA-siw-free-siw_base_qp-in-kref-release-routine.patch
new file mode 100644
index 0000000000..13a39a069c
--- /dev/null
+++ b/patches.suse/RDMA-siw-free-siw_base_qp-in-kref-release-routine.patch
@@ -0,0 +1,56 @@
+From: Krishnamraju Eraparaju <krishna2@chelsio.com>
+Date: Mon, 7 Oct 2019 16:12:29 +0530
+Subject: RDMA/siw: free siw_base_qp in kref release routine
+Patch-mainline: v5.4-rc6
+Git-commit: e17fa5c95ef2434a08e0be217969d246d037f0c2
+References: jsc#SLE-8381
+
+As siw_free_qp() is the last routine to access 'siw_base_qp' structure,
+freeing this structure early in siw_destroy_qp() could cause
+touch-after-free issue.
+Hence, moved kfree(siw_base_qp) from siw_destroy_qp() to siw_free_qp().
+
+Fixes: 303ae1cdfdf7 ("rdma/siw: application interface")
+Signed-off-by: Krishnamraju Eraparaju <krishna2@chelsio.com>
+Link: https://lore.kernel.org/r/20191007104229.29412-1-krishna2@chelsio.com
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/sw/siw/siw_qp.c | 2 ++
+ drivers/infiniband/sw/siw/siw_verbs.c | 2 --
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/sw/siw/siw_qp.c
++++ b/drivers/infiniband/sw/siw/siw_qp.c
+@@ -1312,6 +1312,7 @@ int siw_qp_add(struct siw_device *sdev,
+ void siw_free_qp(struct kref *ref)
+ {
+ struct siw_qp *found, *qp = container_of(ref, struct siw_qp, ref);
++ struct siw_base_qp *siw_base_qp = to_siw_base_qp(qp->ib_qp);
+ struct siw_device *sdev = qp->sdev;
+ unsigned long flags;
+
+@@ -1334,4 +1335,5 @@ void siw_free_qp(struct kref *ref)
+ atomic_dec(&sdev->num_qp);
+ siw_dbg_qp(qp, "free QP\n");
+ kfree_rcu(qp, rcu);
++ kfree(siw_base_qp);
+ }
+--- a/drivers/infiniband/sw/siw/siw_verbs.c
++++ b/drivers/infiniband/sw/siw/siw_verbs.c
+@@ -604,7 +604,6 @@ out:
+ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
+ {
+ struct siw_qp *qp = to_siw_qp(base_qp);
+- struct siw_base_qp *siw_base_qp = to_siw_base_qp(base_qp);
+ struct siw_ucontext *uctx =
+ rdma_udata_to_drv_context(udata, struct siw_ucontext,
+ base_ucontext);
+@@ -641,7 +640,6 @@ int siw_destroy_qp(struct ib_qp *base_qp
+ qp->scq = qp->rcq = NULL;
+
+ siw_qp_put(qp);
+- kfree(siw_base_qp);
+
+ return 0;
+ }
diff --git a/patches.suse/RDMA-uverbs-Prevent-potential-underflow.patch b/patches.suse/RDMA-uverbs-Prevent-potential-underflow.patch
new file mode 100644
index 0000000000..0570f8ac36
--- /dev/null
+++ b/patches.suse/RDMA-uverbs-Prevent-potential-underflow.patch
@@ -0,0 +1,52 @@
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Fri, 11 Oct 2019 16:34:19 +0300
+Subject: RDMA/uverbs: Prevent potential underflow
+Patch-mainline: v5.4-rc6
+Git-commit: a9018adfde809d44e71189b984fa61cc89682b5e
+References: jsc#SLE-8449
+
+The issue is in drivers/infiniband/core/uverbs_std_types_cq.c in the
+UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE) function. We check that:
+
+ if (attr.comp_vector >= attrs->ufile->device->num_comp_vectors) {
+
+But we don't check if "attr.comp_vector" is negative. It could
+potentially lead to an array underflow. My concern would be where
+cq->vector is used in the create_cq() function from the cxgb4 driver.
+
+And really "attr.comp_vector" is appears as a u32 to user space so that's
+the right type to use.
+
+Fixes: 9ee79fce3642 ("IB/core: Add completion queue (cq) object actions")
+Link: https://lore.kernel.org/r/20191011133419.GA22905@mwanda
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Reviewed-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/core/uverbs.h | 2 +-
+ include/rdma/ib_verbs.h | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/core/uverbs.h
++++ b/drivers/infiniband/core/uverbs.h
+@@ -98,7 +98,7 @@ ib_uverbs_init_udata_buf_or_null(struct
+
+ struct ib_uverbs_device {
+ atomic_t refcount;
+- int num_comp_vectors;
++ u32 num_comp_vectors;
+ struct completion comp;
+ struct device dev;
+ /* First group for device attributes, NULL terminated array */
+--- a/include/rdma/ib_verbs.h
++++ b/include/rdma/ib_verbs.h
+@@ -366,7 +366,7 @@ struct ib_tm_caps {
+
+ struct ib_cq_init_attr {
+ unsigned int cqe;
+- int comp_vector;
++ u32 comp_vector;
+ u32 flags;
+ };
+
diff --git a/patches.suse/SUNRPC-Destroy-the-back-channel-when-we-destroy-the-.patch b/patches.suse/SUNRPC-Destroy-the-back-channel-when-we-destroy-the-.patch
new file mode 100644
index 0000000000..4d8363fc3f
--- /dev/null
+++ b/patches.suse/SUNRPC-Destroy-the-back-channel-when-we-destroy-the-.patch
@@ -0,0 +1,61 @@
+From: Trond Myklebust <trondmy@gmail.com>
+Date: Thu, 17 Oct 2019 09:02:21 -0400
+Subject: SUNRPC: Destroy the back channel when we destroy the host transport
+Patch-mainline: v5.4-rc6
+Git-commit: 669996add4c92476e0f8d6b4cd2bb308d1939fd7
+References: bsc#1154353
+
+When we're destroying the host transport mechanism, we should ensure
+that we do not leak memory by failing to release any back channel
+slots that might still exist.
+
+Reported-by: Neil Brown <neilb@suse.de>
+Reported-by: kbuild test robot <lkp@intel.com>
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ include/linux/sunrpc/bc_xprt.h | 5 +++++
+ net/sunrpc/backchannel_rqst.c | 2 +-
+ net/sunrpc/xprt.c | 5 +++++
+ 3 files changed, 11 insertions(+), 1 deletion(-)
+
+--- a/include/linux/sunrpc/bc_xprt.h
++++ b/include/linux/sunrpc/bc_xprt.h
+@@ -64,6 +64,11 @@ static inline int xprt_setup_backchannel
+ return 0;
+ }
+
++static inline void xprt_destroy_backchannel(struct rpc_xprt *xprt,
++ unsigned int max_reqs)
++{
++}
++
+ static inline bool svc_is_backchannel(const struct svc_rqst *rqstp)
+ {
+ return false;
+--- a/net/sunrpc/backchannel_rqst.c
++++ b/net/sunrpc/backchannel_rqst.c
+@@ -220,7 +220,7 @@ void xprt_destroy_bc(struct rpc_xprt *xp
+ goto out;
+
+ spin_lock_bh(&xprt->bc_pa_lock);
+- xprt->bc_alloc_max -= max_reqs;
++ xprt->bc_alloc_max -= min(max_reqs, xprt->bc_alloc_max);
+ list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
+ dprintk("RPC: req=%p\n", req);
+ list_del(&req->rq_bc_pa_list);
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -1943,6 +1943,11 @@ static void xprt_destroy_cb(struct work_
+ rpc_destroy_wait_queue(&xprt->backlog);
+ kfree(xprt->servername);
+ /*
++ * Destroy any existing back channel
++ */
++ xprt_destroy_backchannel(xprt, UINT_MAX);
++
++ /*
+ * Tear down transport state and free the rpc_xprt
+ */
+ xprt->ops->destroy(xprt);
diff --git a/patches.suse/SUNRPC-The-RDMA-back-channel-mustn-t-disappear-while.patch b/patches.suse/SUNRPC-The-RDMA-back-channel-mustn-t-disappear-while.patch
new file mode 100644
index 0000000000..14ef2b1132
--- /dev/null
+++ b/patches.suse/SUNRPC-The-RDMA-back-channel-mustn-t-disappear-while.patch
@@ -0,0 +1,39 @@
+From: Trond Myklebust <trondmy@gmail.com>
+Date: Thu, 17 Oct 2019 09:02:20 -0400
+Subject: SUNRPC: The RDMA back channel mustn't disappear while requests are
+ outstanding
+Patch-mainline: v5.4-rc6
+Git-commit: 9edb455e6797bb50aa38ef71e62668966065ede8
+References: bsc#1154353
+
+If there are RDMA back channel requests being processed by the
+server threads, then we should hold a reference to the transport
+to ensure it doesn't get freed from underneath us.
+
+Reported-by: Neil Brown <neilb@suse.de>
+Fixes: 63cae47005af ("xprtrdma: Handle incoming backward direction RPC calls")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/sunrpc/xprtrdma/backchannel.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/sunrpc/xprtrdma/backchannel.c
++++ b/net/sunrpc/xprtrdma/backchannel.c
+@@ -163,6 +163,7 @@ void xprt_rdma_bc_free_rqst(struct rpc_r
+ spin_lock(&xprt->bc_pa_lock);
+ list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
+ spin_unlock(&xprt->bc_pa_lock);
++ xprt_put(xprt);
+ }
+
+ static struct rpc_rqst *rpcrdma_bc_rqst_get(struct rpcrdma_xprt *r_xprt)
+@@ -259,6 +260,7 @@ void rpcrdma_bc_receive_call(struct rpcr
+
+ /* Queue rqst for ULP's callback service */
+ bc_serv = xprt->bc_serv;
++ xprt_get(xprt);
+ spin_lock(&bc_serv->sv_cb_lock);
+ list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
+ spin_unlock(&bc_serv->sv_cb_lock);
diff --git a/patches.suse/SUNRPC-The-TCP-back-channel-mustn-t-disappear-while-.patch b/patches.suse/SUNRPC-The-TCP-back-channel-mustn-t-disappear-while-.patch
new file mode 100644
index 0000000000..f82b4ea53e
--- /dev/null
+++ b/patches.suse/SUNRPC-The-TCP-back-channel-mustn-t-disappear-while-.patch
@@ -0,0 +1,50 @@
+From: Trond Myklebust <trondmy@gmail.com>
+Date: Thu, 17 Oct 2019 09:02:19 -0400
+Subject: SUNRPC: The TCP back channel mustn't disappear while requests are
+ outstanding
+Patch-mainline: v5.4-rc6
+Git-commit: 875f0706accd6501c3209bb99df8573171fb5d75
+References: bsc#1154353
+
+If there are TCP back channel requests being processed by the
+server threads, then we should hold a reference to the transport
+to ensure it doesn't get freed from underneath us.
+
+Reported-by: Neil Brown <neilb@suse.de>
+Fixes: 2ea24497a1b3 ("SUNRPC: RPC callbacks may be split across several..")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/sunrpc/backchannel_rqst.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/net/sunrpc/backchannel_rqst.c
++++ b/net/sunrpc/backchannel_rqst.c
+@@ -307,8 +307,8 @@ void xprt_free_bc_rqst(struct rpc_rqst *
+ */
+ dprintk("RPC: Last session removed req=%p\n", req);
+ xprt_free_allocation(req);
+- return;
+ }
++ xprt_put(xprt);
+ }
+
+ /*
+@@ -339,7 +339,7 @@ found:
+ spin_unlock(&xprt->bc_pa_lock);
+ if (new) {
+ if (req != new)
+- xprt_free_bc_rqst(new);
++ xprt_free_allocation(new);
+ break;
+ } else if (req)
+ break;
+@@ -368,6 +368,7 @@ void xprt_complete_bc_request(struct rpc
+ set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
+
+ dprintk("RPC: add callback request to list\n");
++ xprt_get(xprt);
+ spin_lock(&bc_serv->sv_cb_lock);
+ list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
+ wake_up(&bc_serv->sv_cb_waitq);
diff --git a/patches.suse/bnxt_en-Adjust-the-time-to-wait-before-polling-firmw.patch b/patches.suse/bnxt_en-Adjust-the-time-to-wait-before-polling-firmw.patch
new file mode 100644
index 0000000000..1fa61a8ca7
--- /dev/null
+++ b/patches.suse/bnxt_en-Adjust-the-time-to-wait-before-polling-firmw.patch
@@ -0,0 +1,43 @@
+From: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Date: Mon, 21 Oct 2019 01:34:27 -0400
+Subject: bnxt_en: Adjust the time to wait before polling firmware readiness.
+Patch-mainline: v5.4-rc6
+Git-commit: c6a9e7aa2e8b15402022a15625284069d4fd6df0
+References: jsc#SLE-8371 bsc#1153274
+
+When firmware indicates that driver needs to invoke firmware reset
+which is common for both error recovery and live firmware reset path,
+driver needs a different time to wait before polling for firmware
+readiness.
+
+Modify the wait time to fw_reset_min_dsecs, which is initialised to
+correct timeout for error recovery and firmware reset.
+
+Fixes: 4037eb715680 ("bnxt_en: Add a new BNXT_FW_RESET_STATE_POLL_FW_DOWN state.")
+Signed-off-by: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -10669,14 +10669,11 @@ static void bnxt_fw_reset_task(struct wo
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
+ }
+ /* fall through */
+- case BNXT_FW_RESET_STATE_RESET_FW: {
+- u32 wait_dsecs = bp->fw_health->post_reset_wait_dsecs;
+-
++ case BNXT_FW_RESET_STATE_RESET_FW:
+ bnxt_reset_all(bp);
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
+- bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
++ bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
+ return;
+- }
+ case BNXT_FW_RESET_STATE_ENABLE_DEV:
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
+ bp->fw_health) {
diff --git a/patches.suse/bnxt_en-Avoid-disabling-pci-device-in-bnxt_remove_on.patch b/patches.suse/bnxt_en-Avoid-disabling-pci-device-in-bnxt_remove_on.patch
new file mode 100644
index 0000000000..22281a07ad
--- /dev/null
+++ b/patches.suse/bnxt_en-Avoid-disabling-pci-device-in-bnxt_remove_on.patch
@@ -0,0 +1,34 @@
+From: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Date: Mon, 21 Oct 2019 01:34:29 -0400
+Subject: bnxt_en: Avoid disabling pci device in bnxt_remove_one() for already
+ disabled device.
+Patch-mainline: v5.4-rc6
+Git-commit: f6824308c4be25ba024ab942a6135aa0356acaea
+References: jsc#SLE-8371 bsc#1153274
+
+With the recently added error recovery logic, the device may already
+be disabled if the firmware recovery is unsuccessful. In
+bnxt_remove_one(), check that the device is still enabled first
+before calling pci_disable_device().
+
+Fixes: 3bc7d4a352ef ("bnxt_en: Add BNXT_STATE_IN_FW_RESET state.")
+Signed-off-by: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -10382,7 +10382,8 @@ static void bnxt_cleanup_pci(struct bnxt
+ {
+ bnxt_unmap_bars(bp, bp->pdev);
+ pci_release_regions(bp->pdev);
+- pci_disable_device(bp->pdev);
++ if (pci_is_enabled(bp->pdev))
++ pci_disable_device(bp->pdev);
+ }
+
+ static void bnxt_init_dflt_coal(struct bnxt *bp)
diff --git a/patches.suse/bnxt_en-Fix-devlink-NVRAM-related-byte-order-related.patch b/patches.suse/bnxt_en-Fix-devlink-NVRAM-related-byte-order-related.patch
new file mode 100644
index 0000000000..bb2090758e
--- /dev/null
+++ b/patches.suse/bnxt_en-Fix-devlink-NVRAM-related-byte-order-related.patch
@@ -0,0 +1,136 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Mon, 21 Oct 2019 01:34:26 -0400
+Subject: bnxt_en: Fix devlink NVRAM related byte order related issues.
+Patch-mainline: v5.4-rc6
+Git-commit: 83a46a82b96c1928ad82958752523fb0c7d9fcce
+References: jsc#SLE-8371 bsc#1153274
+
+The current code does not do endian swapping between the devlink
+parameter and the internal NVRAM representation. Define a union to
+represent the little endian NVRAM data and add 2 helper functions to
+copy to and from the NVRAM data with the proper byte swapping.
+
+Fixes: 782a624d00fa ("bnxt_en: Add bnxt_en initial port params table and register it")
+Cc: Jiri Pirko <jiri@mellanox.com>
+Reviewed-by: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c | 81 ++++++++++++++--------
+ 1 file changed, 54 insertions(+), 27 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+@@ -226,12 +226,55 @@ static const struct bnxt_dl_nvm_param nv
+ BNXT_NVM_SHARED_CFG, 1, 1},
+ };
+
++union bnxt_nvm_data {
++ u8 val8;
++ __le32 val32;
++};
++
++static void bnxt_copy_to_nvm_data(union bnxt_nvm_data *dst,
++ union devlink_param_value *src,
++ int nvm_num_bits, int dl_num_bytes)
++{
++ u32 val32 = 0;
++
++ if (nvm_num_bits == 1) {
++ dst->val8 = src->vbool;
++ return;
++ }
++ if (dl_num_bytes == 4)
++ val32 = src->vu32;
++ else if (dl_num_bytes == 2)
++ val32 = (u32)src->vu16;
++ else if (dl_num_bytes == 1)
++ val32 = (u32)src->vu8;
++ dst->val32 = cpu_to_le32(val32);
++}
++
++static void bnxt_copy_from_nvm_data(union devlink_param_value *dst,
++ union bnxt_nvm_data *src,
++ int nvm_num_bits, int dl_num_bytes)
++{
++ u32 val32;
++
++ if (nvm_num_bits == 1) {
++ dst->vbool = src->val8;
++ return;
++ }
++ val32 = le32_to_cpu(src->val32);
++ if (dl_num_bytes == 4)
++ dst->vu32 = val32;
++ else if (dl_num_bytes == 2)
++ dst->vu16 = (u16)val32;
++ else if (dl_num_bytes == 1)
++ dst->vu8 = (u8)val32;
++}
++
+ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
+ int msg_len, union devlink_param_value *val)
+ {
+ struct hwrm_nvm_get_variable_input *req = msg;
+- void *data_addr = NULL, *buf = NULL;
+ struct bnxt_dl_nvm_param nvm_param;
++ union bnxt_nvm_data *data;
+ dma_addr_t data_dma_addr;
+ int idx = 0, rc, i;
+
+@@ -254,26 +297,9 @@ static int bnxt_hwrm_nvm_req(struct bnxt
+ else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
+ idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
+
+- switch (nvm_param.dl_num_bytes) {
+- case 1:
+- if (nvm_param.nvm_num_bits == 1)
+- buf = &val->vbool;
+- else
+- buf = &val->vu8;
+- break;
+- case 2:
+- buf = &val->vu16;
+- break;
+- case 4:
+- buf = &val->vu32;
+- break;
+- default:
+- return -EFAULT;
+- }
+-
+- data_addr = dma_alloc_coherent(&bp->pdev->dev, nvm_param.dl_num_bytes,
+- &data_dma_addr, GFP_KERNEL);
+- if (!data_addr)
++ data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data),
++ &data_dma_addr, GFP_KERNEL);
++ if (!data)
+ return -ENOMEM;
+
+ req->dest_data_addr = cpu_to_le64(data_dma_addr);
+@@ -284,17 +310,18 @@ static int bnxt_hwrm_nvm_req(struct bnxt
+ req->dimensions = cpu_to_le16(1);
+
+ if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
+- memcpy(data_addr, buf, nvm_param.dl_num_bytes);
++ bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits,
++ nvm_param.dl_num_bytes);
+ rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
+ } else {
+ rc = hwrm_send_message_silent(bp, msg, msg_len,
+ HWRM_CMD_TIMEOUT);
++ if (!rc)
++ bnxt_copy_from_nvm_data(val, data,
++ nvm_param.nvm_num_bits,
++ nvm_param.dl_num_bytes);
+ }
+- if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE))
+- memcpy(buf, data_addr, nvm_param.dl_num_bytes);
+-
+- dma_free_coherent(&bp->pdev->dev, nvm_param.dl_num_bytes, data_addr,
+- data_dma_addr);
++ dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr);
+ if (rc == -EACCES)
+ netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n");
+ return rc;
diff --git a/patches.suse/bnxt_en-Fix-the-size-of-devlink-MSIX-parameters.patch b/patches.suse/bnxt_en-Fix-the-size-of-devlink-MSIX-parameters.patch
new file mode 100644
index 0000000000..f5f72d3034
--- /dev/null
+++ b/patches.suse/bnxt_en-Fix-the-size-of-devlink-MSIX-parameters.patch
@@ -0,0 +1,121 @@
+From: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Date: Mon, 21 Oct 2019 01:34:25 -0400
+Subject: bnxt_en: Fix the size of devlink MSIX parameters.
+Patch-mainline: v5.4-rc6
+Git-commit: c329230ce886f449a6e559b636096b75ab00d18a
+References: jsc#SLE-8371 bsc#1153274
+
+The current code that rounds up the NVRAM parameter bit size to the next
+byte size for the devlink parameter is not always correct. The MSIX
+devlink parameters are 4 bytes and we don't get the correct size
+using this method.
+
+Fix it by adding a new dl_num_bytes member to the bnxt_dl_nvm_param
+structure which statically provides bytesize information according
+to the devlink parameter type definition.
+
+Fixes: 782a624d00fa ("bnxt_en: Add bnxt_en initial port params table and register it")
+Cc: Jiri Pirko <jiri@mellanox.com>
+Signed-off-by: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c | 28 +++++++++++-----------
+ drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h | 3 +-
+ 2 files changed, 16 insertions(+), 15 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+@@ -215,15 +215,15 @@ enum bnxt_dl_param_id {
+
+ static const struct bnxt_dl_nvm_param nvm_params[] = {
+ {DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV,
+- BNXT_NVM_SHARED_CFG, 1},
++ BNXT_NVM_SHARED_CFG, 1, 1},
+ {DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI, NVM_OFF_IGNORE_ARI,
+- BNXT_NVM_SHARED_CFG, 1},
++ BNXT_NVM_SHARED_CFG, 1, 1},
+ {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+- NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10},
++ NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10, 4},
+ {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+- NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7},
++ NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7, 4},
+ {BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK,
+- BNXT_NVM_SHARED_CFG, 1},
++ BNXT_NVM_SHARED_CFG, 1, 1},
+ };
+
+ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
+@@ -232,8 +232,8 @@ static int bnxt_hwrm_nvm_req(struct bnxt
+ struct hwrm_nvm_get_variable_input *req = msg;
+ void *data_addr = NULL, *buf = NULL;
+ struct bnxt_dl_nvm_param nvm_param;
+- int bytesize, idx = 0, rc, i;
+ dma_addr_t data_dma_addr;
++ int idx = 0, rc, i;
+
+ /* Get/Set NVM CFG parameter is supported only on PFs */
+ if (BNXT_VF(bp))
+@@ -254,10 +254,9 @@ static int bnxt_hwrm_nvm_req(struct bnxt
+ else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
+ idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
+
+- bytesize = roundup(nvm_param.num_bits, BITS_PER_BYTE) / BITS_PER_BYTE;
+- switch (bytesize) {
++ switch (nvm_param.dl_num_bytes) {
+ case 1:
+- if (nvm_param.num_bits == 1)
++ if (nvm_param.nvm_num_bits == 1)
+ buf = &val->vbool;
+ else
+ buf = &val->vu8;
+@@ -272,29 +271,30 @@ static int bnxt_hwrm_nvm_req(struct bnxt
+ return -EFAULT;
+ }
+
+- data_addr = dma_alloc_coherent(&bp->pdev->dev, bytesize,
++ data_addr = dma_alloc_coherent(&bp->pdev->dev, nvm_param.dl_num_bytes,
+ &data_dma_addr, GFP_KERNEL);
+ if (!data_addr)
+ return -ENOMEM;
+
+ req->dest_data_addr = cpu_to_le64(data_dma_addr);
+- req->data_len = cpu_to_le16(nvm_param.num_bits);
++ req->data_len = cpu_to_le16(nvm_param.nvm_num_bits);
+ req->option_num = cpu_to_le16(nvm_param.offset);
+ req->index_0 = cpu_to_le16(idx);
+ if (idx)
+ req->dimensions = cpu_to_le16(1);
+
+ if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
+- memcpy(data_addr, buf, bytesize);
++ memcpy(data_addr, buf, nvm_param.dl_num_bytes);
+ rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
+ } else {
+ rc = hwrm_send_message_silent(bp, msg, msg_len,
+ HWRM_CMD_TIMEOUT);
+ }
+ if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE))
+- memcpy(buf, data_addr, bytesize);
++ memcpy(buf, data_addr, nvm_param.dl_num_bytes);
+
+- dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr);
++ dma_free_coherent(&bp->pdev->dev, nvm_param.dl_num_bytes, data_addr,
++ data_dma_addr);
+ if (rc == -EACCES)
+ netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n");
+ return rc;
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+@@ -52,7 +52,8 @@ struct bnxt_dl_nvm_param {
+ u16 id;
+ u16 offset;
+ u16 dir_type;
+- u16 num_bits;
++ u16 nvm_num_bits;
++ u8 dl_num_bytes;
+ };
+
+ void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
diff --git a/patches.suse/bnxt_en-Minor-formatting-changes-in-FW-devlink_healt.patch b/patches.suse/bnxt_en-Minor-formatting-changes-in-FW-devlink_healt.patch
new file mode 100644
index 0000000000..6d9253036b
--- /dev/null
+++ b/patches.suse/bnxt_en-Minor-formatting-changes-in-FW-devlink_healt.patch
@@ -0,0 +1,54 @@
+From: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Date: Mon, 21 Oct 2019 01:34:28 -0400
+Subject: bnxt_en: Minor formatting changes in FW devlink_health_reporter
+Patch-mainline: v5.4-rc6
+Git-commit: f255ed1c4e4c5ed8171b6e81dce1297df1f1b60c
+References: jsc#SLE-8371 bsc#1153274
+
+Minor formatting changes to diagnose cb for FW devlink health
+reporter.
+
+Suggested-by: Jiri Pirko <jiri@mellanox.com>
+Cc: Jiri Pirko <jiri@mellanox.com>
+Signed-off-by: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c | 17 ++++++-----------
+ 1 file changed, 6 insertions(+), 11 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+@@ -29,25 +29,20 @@ static int bnxt_fw_reporter_diagnose(str
+ val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ health_status = val & 0xffff;
+
+- if (health_status == BNXT_FW_STATUS_HEALTHY) {
+- rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
+- "Healthy;");
+- if (rc)
+- return rc;
+- } else if (health_status < BNXT_FW_STATUS_HEALTHY) {
+- rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
+- "Not yet completed initialization;");
++ if (health_status < BNXT_FW_STATUS_HEALTHY) {
++ rc = devlink_fmsg_string_pair_put(fmsg, "Description",
++ "Not yet completed initialization");
+ if (rc)
+ return rc;
+ } else if (health_status > BNXT_FW_STATUS_HEALTHY) {
+- rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
+- "Encountered fatal error and cannot recover;");
++ rc = devlink_fmsg_string_pair_put(fmsg, "Description",
++ "Encountered fatal error and cannot recover");
+ if (rc)
+ return rc;
+ }
+
+ if (val >> 16) {
+- rc = devlink_fmsg_u32_pair_put(fmsg, "Error", val >> 16);
++ rc = devlink_fmsg_u32_pair_put(fmsg, "Error code", val >> 16);
+ if (rc)
+ return rc;
+ }
diff --git a/patches.suse/bonding-fix-unexpected-IFF_BONDING-bit-unset.patch b/patches.suse/bonding-fix-unexpected-IFF_BONDING-bit-unset.patch
new file mode 100644
index 0000000000..ad944e4934
--- /dev/null
+++ b/patches.suse/bonding-fix-unexpected-IFF_BONDING-bit-unset.patch
@@ -0,0 +1,93 @@
+From: Taehee Yoo <ap420073@gmail.com>
+Date: Mon, 21 Oct 2019 18:47:52 +0000
+Subject: bonding: fix unexpected IFF_BONDING bit unset
+Patch-mainline: v5.4-rc6
+Git-commit: 65de65d9033750d2cf1b336c9d6e9da3a8b5cc6e
+References: bsc#1154353
+
+The IFF_BONDING means bonding master or bonding slave device.
+->ndo_add_slave() sets IFF_BONDING flag and ->ndo_del_slave() unsets
+IFF_BONDING flag.
+
+bond0<--bond1
+
+Both bond0 and bond1 are bonding device and these should keep having
+IFF_BONDING flag until they are removed.
+But bond1 would lose IFF_BONDING at ->ndo_del_slave() because that routine
+do not check whether the slave device is the bonding type or not.
+This patch adds the interface type check routine before removing
+IFF_BONDING flag.
+
+Test commands:
+ ip link add bond0 type bond
+ ip link add bond1 type bond
+ ip link set bond1 master bond0
+ ip link set bond1 nomaster
+ ip link del bond1 type bond
+ ip link add bond1 type bond
+
+Splat looks like:
+[ 226.665555] proc_dir_entry 'bonding/bond1' already registered
+[ 226.666440] WARNING: CPU: 0 PID: 737 at fs/proc/generic.c:361 proc_register+0x2a9/0x3e0
+[ 226.667571] Modules linked in: bonding af_packet sch_fq_codel ip_tables x_tables unix
+[ 226.668662] CPU: 0 PID: 737 Comm: ip Not tainted 5.4.0-rc3+ #96
+[ 226.669508] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006
+[ 226.670652] RIP: 0010:proc_register+0x2a9/0x3e0
+[ 226.671612] Code: 89 fa 48 c1 ea 03 80 3c 02 00 0f 85 39 01 00 00 48 8b 04 24 48 89 ea 48 c7 c7 a0 0b 14 9f 48 8b b0 e
+0 00 00 00 e8 07 e7 88 ff <0f> 0b 48 c7 c7 40 2d a5 9f e8 59 d6 23 01 48 8b 4c 24 10 48 b8 00
+[ 226.675007] RSP: 0018:ffff888050e17078 EFLAGS: 00010282
+[ 226.675761] RAX: dffffc0000000008 RBX: ffff88805fdd0f10 RCX: ffffffff9dd344e2
+[ 226.676757] RDX: 0000000000000001 RSI: 0000000000000008 RDI: ffff88806c9f6b8c
+[ 226.677751] RBP: ffff8880507160f3 R08: ffffed100d940019 R09: ffffed100d940019
+[ 226.678761] R10: 0000000000000001 R11: ffffed100d940018 R12: ffff888050716008
+[ 226.679757] R13: ffff8880507160f2 R14: dffffc0000000000 R15: ffffed100a0e2c1e
+[ 226.680758] FS: 00007fdc217cc0c0(0000) GS:ffff88806c800000(0000) knlGS:0000000000000000
+[ 226.681886] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 226.682719] CR2: 00007f49313424d0 CR3: 0000000050e46001 CR4: 00000000000606f0
+[ 226.683727] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[ 226.684725] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[ 226.685681] Call Trace:
+[ 226.687089] proc_create_seq_private+0xb3/0xf0
+[ 226.687778] bond_create_proc_entry+0x1b3/0x3f0 [bonding]
+[ 226.691458] bond_netdev_event+0x433/0x970 [bonding]
+[ 226.692139] ? __module_text_address+0x13/0x140
+[ 226.692779] notifier_call_chain+0x90/0x160
+[ 226.693401] register_netdevice+0x9b3/0xd80
+[ 226.694010] ? alloc_netdev_mqs+0x854/0xc10
+[ 226.694629] ? netdev_change_features+0xa0/0xa0
+[ 226.695278] ? rtnl_create_link+0x2ed/0xad0
+[ 226.695849] bond_newlink+0x2a/0x60 [bonding]
+[ 226.696422] __rtnl_newlink+0xb9f/0x11b0
+[ 226.696968] ? rtnl_link_unregister+0x220/0x220
+[ ... ]
+
+Fixes: 0b680e753724 ("[PATCH] bonding: Add priv_flag to avoid event mishandling")
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/bonding/bond_main.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1816,7 +1816,8 @@ err_detach:
+ slave_disable_netpoll(new_slave);
+
+ err_close:
+- slave_dev->priv_flags &= ~IFF_BONDING;
++ if (!netif_is_bond_master(slave_dev))
++ slave_dev->priv_flags &= ~IFF_BONDING;
+ dev_close(slave_dev);
+
+ err_restore_mac:
+@@ -2017,7 +2018,8 @@ static int __bond_release_one(struct net
+ else
+ dev_set_mtu(slave_dev, slave->original_mtu);
+
+- slave_dev->priv_flags &= ~IFF_BONDING;
++ if (!netif_is_bond_master(slave_dev))
++ slave_dev->priv_flags &= ~IFF_BONDING;
+
+ bond_free_slave(slave);
+
diff --git a/patches.suse/bonding-fix-using-uninitialized-mode_lock.patch b/patches.suse/bonding-fix-using-uninitialized-mode_lock.patch
new file mode 100644
index 0000000000..66eaec99ce
--- /dev/null
+++ b/patches.suse/bonding-fix-using-uninitialized-mode_lock.patch
@@ -0,0 +1,106 @@
+From: Taehee Yoo <ap420073@gmail.com>
+Date: Tue, 29 Oct 2019 09:12:32 +0000
+Subject: bonding: fix using uninitialized mode_lock
+Patch-mainline: v5.4-rc6
+Git-commit: ad9bd8daf2f9938572b0604e1280fefa8f338581
+References: bsc#1154353
+
+When a bonding interface is being created, it setups its mode and options.
+At that moment, it uses mode_lock so mode_lock should be initialized
+before that moment.
+
+rtnl_newlink()
+ rtnl_create_link()
+ alloc_netdev_mqs()
+ ->setup() //bond_setup()
+ ->newlink //bond_newlink
+ bond_changelink()
+ register_netdevice()
+ ->ndo_init() //bond_init()
+
+After commit 089bca2caed0 ("bonding: use dynamic lockdep key instead of
+subclass"), mode_lock is initialized in bond_init().
+So in the bond_changelink(), un-initialized mode_lock can be used.
+mode_lock should be initialized in bond_setup().
+This patch partially reverts commit 089bca2caed0 ("bonding: use dynamic
+lockdep key instead of subclass")
+
+Test command:
+ ip link add bond0 type bond mode 802.3ad lacp_rate 0
+
+Splat looks like:
+[ 60.615127] INFO: trying to register non-static key.
+[ 60.615900] the code is fine but needs lockdep annotation.
+[ 60.616697] turning off the locking correctness validator.
+[ 60.617490] CPU: 1 PID: 957 Comm: ip Not tainted 5.4.0-rc3+ #109
+[ 60.618350] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006
+[ 60.619481] Call Trace:
+[ 60.619918] dump_stack+0x7c/0xbb
+[ 60.620453] register_lock_class+0x1215/0x14d0
+[ 60.621131] ? alloc_netdev_mqs+0x7b3/0xcc0
+[ 60.621771] ? is_bpf_text_address+0x86/0xf0
+[ 60.622416] ? is_dynamic_key+0x230/0x230
+[ 60.623032] ? unwind_get_return_address+0x5f/0xa0
+[ 60.623757] ? create_prof_cpu_mask+0x20/0x20
+[ 60.624408] ? arch_stack_walk+0x83/0xb0
+[ 60.625023] __lock_acquire+0xd8/0x3de0
+[ 60.625616] ? stack_trace_save+0x82/0xb0
+[ 60.626225] ? stack_trace_consume_entry+0x160/0x160
+[ 60.626957] ? deactivate_slab.isra.80+0x2c5/0x800
+[ 60.627668] ? register_lock_class+0x14d0/0x14d0
+[ 60.628380] ? alloc_netdev_mqs+0x7b3/0xcc0
+[ 60.629020] ? save_stack+0x69/0x80
+[ 60.629574] ? save_stack+0x19/0x80
+[ 60.630121] ? __kasan_kmalloc.constprop.4+0xa0/0xd0
+[ 60.630859] ? __kmalloc_node+0x16f/0x480
+[ 60.631472] ? alloc_netdev_mqs+0x7b3/0xcc0
+[ 60.632121] ? rtnl_create_link+0x2ed/0xad0
+[ 60.634388] ? __rtnl_newlink+0xad4/0x11b0
+[ 60.635024] lock_acquire+0x164/0x3b0
+[ 60.635608] ? bond_3ad_update_lacp_rate+0x91/0x200 [bonding]
+[ 60.636463] _raw_spin_lock_bh+0x38/0x70
+[ 60.637084] ? bond_3ad_update_lacp_rate+0x91/0x200 [bonding]
+[ 60.637930] bond_3ad_update_lacp_rate+0x91/0x200 [bonding]
+[ 60.638753] ? bond_3ad_lacpdu_recv+0xb30/0xb30 [bonding]
+[ 60.639552] ? bond_opt_get_val+0x180/0x180 [bonding]
+[ 60.640307] ? ___slab_alloc+0x5aa/0x610
+[ 60.640925] bond_option_lacp_rate_set+0x71/0x140 [bonding]
+[ 60.641751] __bond_opt_set+0x1ff/0xbb0 [bonding]
+[ 60.643217] ? kasan_unpoison_shadow+0x30/0x40
+[ 60.643924] bond_changelink+0x9a4/0x1700 [bonding]
+[ 60.644653] ? memset+0x1f/0x40
+[ 60.742941] ? bond_slave_changelink+0x1a0/0x1a0 [bonding]
+[ 60.752694] ? alloc_netdev_mqs+0x8ea/0xcc0
+[ 60.753330] ? rtnl_create_link+0x2ed/0xad0
+[ 60.753964] bond_newlink+0x1e/0x60 [bonding]
+[ 60.754612] __rtnl_newlink+0xb9f/0x11b0
+[ ... ]
+
+Reported-by: syzbot+8da67f407bcba2c72e6e@syzkaller.appspotmail.com
+Reported-by: syzbot+0d083911ab18b710da71@syzkaller.appspotmail.com
+Fixes: 089bca2caed0 ("bonding: use dynamic lockdep key instead of subclass")
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/bonding/bond_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -4284,6 +4284,7 @@ void bond_setup(struct net_device *bond_
+ {
+ struct bonding *bond = netdev_priv(bond_dev);
+
++ spin_lock_init(&bond->mode_lock);
+ bond->params = bonding_defaults;
+
+ /* Initialize pointers */
+@@ -4756,7 +4757,6 @@ static int bond_init(struct net_device *
+ if (!bond->wq)
+ return -ENOMEM;
+
+- spin_lock_init(&bond->mode_lock);
+ spin_lock_init(&bond->stats_lock);
+ lockdep_register_key(&bond->stats_lock_key);
+ lockdep_set_class(&bond->stats_lock, &bond->stats_lock_key);
diff --git a/patches.suse/bonding-use-dynamic-lockdep-key-instead-of-subclass.patch b/patches.suse/bonding-use-dynamic-lockdep-key-instead-of-subclass.patch
new file mode 100644
index 0000000000..a362f22025
--- /dev/null
+++ b/patches.suse/bonding-use-dynamic-lockdep-key-instead-of-subclass.patch
@@ -0,0 +1,131 @@
+From: Taehee Yoo <ap420073@gmail.com>
+Date: Mon, 21 Oct 2019 18:47:53 +0000
+Subject: bonding: use dynamic lockdep key instead of subclass
+Patch-mainline: v5.4-rc6
+Git-commit: 089bca2caed0d0dea7da235ce1fe245808f5ec02
+References: bsc#1154353
+
+All bonding device has same lockdep key and subclass is initialized with
+nest_level.
+But actual nest_level value can be changed when a lower device is attached.
+And at this moment, the subclass should be updated but it seems to be
+unsafe.
+So this patch makes bonding use dynamic lockdep key instead of the
+subclass.
+
+Test commands:
+ ip link add bond0 type bond
+
+ for i in {1..5}
+ do
+ let A=$i-1
+ ip link add bond$i type bond
+ ip link set bond$i master bond$A
+ done
+ ip link set bond5 master bond0
+
+Splat looks like:
+[ 307.992912] WARNING: possible recursive locking detected
+[ 307.993656] 5.4.0-rc3+ #96 Tainted: G W
+[ 307.994367] --------------------------------------------
+[ 307.995092] ip/761 is trying to acquire lock:
+[ 307.995710] ffff8880513aac60 (&(&bond->stats_lock)->rlock#2/2){+.+.}, at: bond_get_stats+0xb8/0x500 [bonding]
+[ 307.997045]
+ but task is already holding lock:
+[ 307.997923] ffff88805fcbac60 (&(&bond->stats_lock)->rlock#2/2){+.+.}, at: bond_get_stats+0xb8/0x500 [bonding]
+[ 307.999215]
+ other info that might help us debug this:
+[ 308.000251] Possible unsafe locking scenario:
+
+[ 308.001137] CPU0
+[ 308.001533] ----
+[ 308.001915] lock(&(&bond->stats_lock)->rlock#2/2);
+[ 308.002609] lock(&(&bond->stats_lock)->rlock#2/2);
+[ 308.003302]
+ *** DEADLOCK ***
+
+[ 308.004310] May be due to missing lock nesting notation
+
+[ 308.005319] 3 locks held by ip/761:
+[ 308.005830] #0: ffffffff9fcc42b0 (rtnl_mutex){+.+.}, at: rtnetlink_rcv_msg+0x466/0x8a0
+[ 308.006894] #1: ffff88805fcbac60 (&(&bond->stats_lock)->rlock#2/2){+.+.}, at: bond_get_stats+0xb8/0x500 [bonding]
+[ 308.008243] #2: ffffffff9f9219c0 (rcu_read_lock){....}, at: bond_get_stats+0x9f/0x500 [bonding]
+[ 308.009422]
+ stack backtrace:
+[ 308.010124] CPU: 0 PID: 761 Comm: ip Tainted: G W 5.4.0-rc3+ #96
+[ 308.011097] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006
+[ 308.012179] Call Trace:
+[ 308.012601] dump_stack+0x7c/0xbb
+[ 308.013089] __lock_acquire+0x269d/0x3de0
+[ 308.013669] ? register_lock_class+0x14d0/0x14d0
+[ 308.014318] lock_acquire+0x164/0x3b0
+[ 308.014858] ? bond_get_stats+0xb8/0x500 [bonding]
+[ 308.015520] _raw_spin_lock_nested+0x2e/0x60
+[ 308.016129] ? bond_get_stats+0xb8/0x500 [bonding]
+[ 308.017215] bond_get_stats+0xb8/0x500 [bonding]
+[ 308.018454] ? bond_arp_rcv+0xf10/0xf10 [bonding]
+[ 308.019710] ? rcu_read_lock_held+0x90/0xa0
+[ 308.020605] ? rcu_read_lock_sched_held+0xc0/0xc0
+[ 308.021286] ? bond_get_stats+0x9f/0x500 [bonding]
+[ 308.021953] dev_get_stats+0x1ec/0x270
+[ 308.022508] bond_get_stats+0x1d1/0x500 [bonding]
+
+Fixes: d3fff6c443fe ("net: add netdev_lockdep_set_classes() helper")
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/bonding/bond_main.c | 10 +++++++---
+ include/net/bonding.h | 1 +
+ 2 files changed, 8 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3459,7 +3459,7 @@ static void bond_get_stats(struct net_de
+ struct list_head *iter;
+ struct slave *slave;
+
+- spin_lock_nested(&bond->stats_lock, bond_get_nest_level(bond_dev));
++ spin_lock(&bond->stats_lock);
+ memcpy(stats, &bond->bond_stats, sizeof(*stats));
+
+ rcu_read_lock();
+@@ -4297,8 +4297,6 @@ void bond_setup(struct net_device *bond_
+ {
+ struct bonding *bond = netdev_priv(bond_dev);
+
+- spin_lock_init(&bond->mode_lock);
+- spin_lock_init(&bond->stats_lock);
+ bond->params = bonding_defaults;
+
+ /* Initialize pointers */
+@@ -4367,6 +4365,7 @@ static void bond_uninit(struct net_devic
+
+ list_del(&bond->bond_list);
+
++ lockdep_unregister_key(&bond->stats_lock_key);
+ bond_debug_unregister(bond);
+ }
+
+@@ -4772,6 +4771,11 @@ static int bond_init(struct net_device *
+
+ bond->nest_level = SINGLE_DEPTH_NESTING;
+
++ spin_lock_init(&bond->mode_lock);
++ spin_lock_init(&bond->stats_lock);
++ lockdep_register_key(&bond->stats_lock_key);
++ lockdep_set_class(&bond->stats_lock, &bond->stats_lock_key);
++
+ list_add_tail(&bond->bond_list, &bn->dev_list);
+
+ bond_prepare_sysfs_group(bond);
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -239,6 +239,7 @@ struct bonding {
+ struct dentry *debug_dir;
+ #endif /* CONFIG_DEBUG_FS */
+ struct rtnl_link_stats64 bond_stats;
++ struct lock_class_key stats_lock_key;
+ };
+
+ #define bond_slave_get_rcu(dev) \
diff --git a/patches.suse/bpf-lwtunnel-Fix-reroute-supplying-invalid-dst.patch b/patches.suse/bpf-lwtunnel-Fix-reroute-supplying-invalid-dst.patch
new file mode 100644
index 0000000000..e0431a693d
--- /dev/null
+++ b/patches.suse/bpf-lwtunnel-Fix-reroute-supplying-invalid-dst.patch
@@ -0,0 +1,50 @@
+From: Jiri Benc <jbenc@redhat.com>
+Date: Wed, 9 Oct 2019 10:31:24 +0200
+Subject: bpf: lwtunnel: Fix reroute supplying invalid dst
+Patch-mainline: v5.4-rc6
+Git-commit: 9e8acd9c44a0dd52b2922eeb82398c04e356c058
+References: bsc#1154353
+
+The dst in bpf_input() has lwtstate field set. As it is of the
+LWTUNNEL_ENCAP_BPF type, lwtstate->data is struct bpf_lwt. When the bpf
+program returns BPF_LWT_REROUTE, ip_route_input_noref is directly called on
+this skb. This causes invalid memory access, as ip_route_input_slow calls
+skb_tunnel_info(skb) that expects the dst->lwstate->data to be
+struct ip_tunnel_info. This results to struct bpf_lwt being accessed as
+struct ip_tunnel_info.
+
+Drop the dst before calling the IP route input functions (both for IPv4 and
+IPv6).
+
+Reported by KASAN.
+
+Fixes: 3bd0b15281af ("bpf: add handling of BPF_LWT_REROUTE to lwt_bpf.c")
+Signed-off-by: Jiri Benc <jbenc@redhat.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Peter Oskolkov <posk@google.com>
+Link: https://lore.kernel.org/bpf/111664d58fe4e9dd9c8014bb3d0b2dab93086a9e.1570609794.git.jbenc@redhat.com
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/core/lwt_bpf.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/net/core/lwt_bpf.c
++++ b/net/core/lwt_bpf.c
+@@ -88,11 +88,16 @@ static int bpf_lwt_input_reroute(struct
+ int err = -EINVAL;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
++ struct net_device *dev = skb_dst(skb)->dev;
+ struct iphdr *iph = ip_hdr(skb);
+
++ dev_hold(dev);
++ skb_dst_drop(skb);
+ err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
+- iph->tos, skb_dst(skb)->dev);
++ iph->tos, dev);
++ dev_put(dev);
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
++ skb_dst_drop(skb);
+ err = ipv6_stub->ipv6_route_input(skb);
+ } else {
+ err = -EAFNOSUPPORT;
diff --git a/patches.suse/cxgb4-fix-panic-when-attaching-to-ULD-fail.patch b/patches.suse/cxgb4-fix-panic-when-attaching-to-ULD-fail.patch
new file mode 100644
index 0000000000..da2a10ab6f
--- /dev/null
+++ b/patches.suse/cxgb4-fix-panic-when-attaching-to-ULD-fail.patch
@@ -0,0 +1,90 @@
+From: Vishal Kulkarni <vishal@chelsio.com>
+Date: Wed, 30 Oct 2019 20:17:57 +0530
+Subject: cxgb4: fix panic when attaching to ULD fail
+Patch-mainline: v5.4-rc6
+Git-commit: fc89cc358fb64e2429aeae0f37906126636507ec
+References: jsc#SLE-8389
+
+Release resources when attaching to ULD fail. Otherwise, data
+mismatch is seen between LLD and ULD later on, which lead to
+kernel panic when accessing resources that should not even
+exist in the first place.
+
+Fixes: 94cdb8bb993a ("cxgb4: Add support for dynamic allocation of resources for ULD")
+Signed-off-by: Shahjada Abul Husain <shahjada@chelsio.com>
+Signed-off-by: Vishal Kulkarni <vishal@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c | 28 ++++++++++++++-----------
+ 1 file changed, 16 insertions(+), 12 deletions(-)
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+@@ -695,10 +695,10 @@ static void uld_init(struct adapter *ada
+ lld->write_cmpl_support = adap->params.write_cmpl_support;
+ }
+
+-static void uld_attach(struct adapter *adap, unsigned int uld)
++static int uld_attach(struct adapter *adap, unsigned int uld)
+ {
+- void *handle;
+ struct cxgb4_lld_info lli;
++ void *handle;
+
+ uld_init(adap, &lli);
+ uld_queue_init(adap, uld, &lli);
+@@ -708,7 +708,7 @@ static void uld_attach(struct adapter *a
+ dev_warn(adap->pdev_dev,
+ "could not attach to the %s driver, error %ld\n",
+ adap->uld[uld].name, PTR_ERR(handle));
+- return;
++ return PTR_ERR(handle);
+ }
+
+ adap->uld[uld].handle = handle;
+@@ -716,22 +716,22 @@ static void uld_attach(struct adapter *a
+
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
+ adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
++
++ return 0;
+ }
+
+-/**
+- * cxgb4_register_uld - register an upper-layer driver
+- * @type: the ULD type
+- * @p: the ULD methods
++/* cxgb4_register_uld - register an upper-layer driver
++ * @type: the ULD type
++ * @p: the ULD methods
+ *
+- * Registers an upper-layer driver with this driver and notifies the ULD
+- * about any presently available devices that support its type. Returns
+- * %-EBUSY if a ULD of the same type is already registered.
++ * Registers an upper-layer driver with this driver and notifies the ULD
++ * about any presently available devices that support its type.
+ */
+ void cxgb4_register_uld(enum cxgb4_uld type,
+ const struct cxgb4_uld_info *p)
+ {
+- int ret = 0;
+ struct adapter *adap;
++ int ret = 0;
+
+ if (type >= CXGB4_ULD_MAX)
+ return;
+@@ -763,8 +763,12 @@ void cxgb4_register_uld(enum cxgb4_uld t
+ if (ret)
+ goto free_irq;
+ adap->uld[type] = *p;
+- uld_attach(adap, type);
++ ret = uld_attach(adap, type);
++ if (ret)
++ goto free_txq;
+ continue;
++free_txq:
++ release_sge_txq_uld(adap, type);
+ free_irq:
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
+ quiesce_rx_uld(adap, type);
diff --git a/patches.suse/cxgb4-request-the-TX-CIDX-updates-to-status-page.patch b/patches.suse/cxgb4-request-the-TX-CIDX-updates-to-status-page.patch
new file mode 100644
index 0000000000..ab8f84cda1
--- /dev/null
+++ b/patches.suse/cxgb4-request-the-TX-CIDX-updates-to-status-page.patch
@@ -0,0 +1,48 @@
+From: Raju Rangoju <rajur@chelsio.com>
+Date: Wed, 23 Oct 2019 23:03:55 +0530
+Subject: cxgb4: request the TX CIDX updates to status page
+Patch-mainline: v5.4-rc6
+Git-commit: 7c3bebc3d8688b84795c11848c314a2fbfe045e0
+References: jsc#SLE-8389
+
+For adapters which support the SGE Doorbell Queue Timer facility,
+we configured the Ethernet TX Queues to send CIDX Updates to the
+Associated Ethernet RX Response Queue with CPL_SGE_EGR_UPDATE
+messages to allow us to respond more quickly to the CIDX Updates.
+But, this was adding load to PCIe Link RX bandwidth and,
+potentially, resulting in higher CPU Interrupt load.
+
+This patch requests the HW to deliver the CIDX updates to the TX
+queue status page rather than generating an ingress queue message
+(as an interrupt). With this patch, the load on RX bandwidth is
+reduced and a substantial improvement in BW is noticed at lower
+IO sizes.
+
+Fixes: d429005fdf2c ("cxgb4/cxgb4vf: Add support for SGE doorbell queue timer")
+Signed-off-by: Raju Rangoju <rajur@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/chelsio/cxgb4/sge.c | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+@@ -3791,15 +3791,11 @@ int t4_sge_alloc_eth_txq(struct adapter
+ * write the CIDX Updates into the Status Page at the end of the
+ * TX Queue.
+ */
+- c.autoequiqe_to_viid = htonl((dbqt
+- ? FW_EQ_ETH_CMD_AUTOEQUIQE_F
+- : FW_EQ_ETH_CMD_AUTOEQUEQE_F) |
++ c.autoequiqe_to_viid = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
+ FW_EQ_ETH_CMD_VIID_V(pi->viid));
+
+ c.fetchszm_to_iqid =
+- htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(dbqt
+- ? HOSTFCMODE_INGRESS_QUEUE_X
+- : HOSTFCMODE_STATUS_PAGE_X) |
++ htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+ FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
+ FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
+
diff --git a/patches.suse/e1000-fix-memory-leaks.patch b/patches.suse/e1000-fix-memory-leaks.patch
new file mode 100644
index 0000000000..56a12388aa
--- /dev/null
+++ b/patches.suse/e1000-fix-memory-leaks.patch
@@ -0,0 +1,54 @@
+From: Wenwen Wang <wenwen@cs.uga.edu>
+Date: Mon, 12 Aug 2019 00:59:21 -0500
+Subject: e1000: fix memory leaks
+Patch-mainline: v5.4-rc6
+Git-commit: 8472ba62154058b64ebb83d5f57259a352d28697
+References: jsc#SLE-8100
+
+In e1000_set_ringparam(), 'tx_old' and 'rx_old' are not deallocated if
+e1000_up() fails, leading to memory leaks. Refactor the code to fix this
+issue.
+
+Signed-off-by: Wenwen Wang <wenwen@cs.uga.edu>
+Tested-by: Aaron Brown <aaron.f.brown@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/intel/e1000/e1000_ethtool.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+@@ -607,6 +607,7 @@ static int e1000_set_ringparam(struct ne
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ rxdr[i].count = rxdr->count;
+
++ err = 0;
+ if (netif_running(adapter->netdev)) {
+ /* Try to get new resources before deleting old */
+ err = e1000_setup_all_rx_resources(adapter);
+@@ -627,14 +628,13 @@ static int e1000_set_ringparam(struct ne
+ adapter->rx_ring = rxdr;
+ adapter->tx_ring = txdr;
+ err = e1000_up(adapter);
+- if (err)
+- goto err_setup;
+ }
+ kfree(tx_old);
+ kfree(rx_old);
+
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+- return 0;
++ return err;
++
+ err_setup_tx:
+ e1000_free_all_rx_resources(adapter);
+ err_setup_rx:
+@@ -646,7 +646,6 @@ err_alloc_rx:
+ err_alloc_tx:
+ if (netif_running(adapter->netdev))
+ e1000_up(adapter);
+-err_setup:
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+ return err;
+ }
diff --git a/patches.suse/erspan-fix-the-tun_info-options_len-check-for-erspan.patch b/patches.suse/erspan-fix-the-tun_info-options_len-check-for-erspan.patch
new file mode 100644
index 0000000000..84be588d62
--- /dev/null
+++ b/patches.suse/erspan-fix-the-tun_info-options_len-check-for-erspan.patch
@@ -0,0 +1,49 @@
+From: Xin Long <lucien.xin@gmail.com>
+Date: Mon, 28 Oct 2019 23:19:35 +0800
+Subject: erspan: fix the tun_info options_len check for erspan
+Patch-mainline: v5.4-rc6
+Git-commit: 2eb8d6d2910cfe3dc67dc056f26f3dd9c63d47cd
+References: bsc#1154353
+
+The check for !md doens't really work for ip_tunnel_info_opts(info) which
+only does info + 1. Also to avoid out-of-bounds access on info, it should
+ensure options_len is not less than erspan_metadata in both erspan_xmit()
+and ip6erspan_tunnel_xmit().
+
+Fixes: 1a66a836da ("gre: add collect_md mode to ERSPAN tunnel")
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/ipv4/ip_gre.c | 4 ++--
+ net/ipv6/ip6_gre.c | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -509,9 +509,9 @@ static void erspan_fb_xmit(struct sk_buf
+ key = &tun_info->key;
+ if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
+ goto err_free_skb;
+- md = ip_tunnel_info_opts(tun_info);
+- if (!md)
++ if (tun_info->options_len < sizeof(*md))
+ goto err_free_skb;
++ md = ip_tunnel_info_opts(tun_info);
+
+ /* ERSPAN has fixed 8 byte GRE header */
+ version = md->version;
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -980,9 +980,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit
+ dsfield = key->tos;
+ if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
+ goto tx_err;
+- md = ip_tunnel_info_opts(tun_info);
+- if (!md)
++ if (tun_info->options_len < sizeof(*md))
+ goto tx_err;
++ md = ip_tunnel_info_opts(tun_info);
+
+ tun_id = tunnel_id_to_key32(key->tun_id);
+ if (md->version == 1) {
diff --git a/patches.suse/i40e-Fix-receive-buffer-starvation-for-AF_XDP.patch b/patches.suse/i40e-Fix-receive-buffer-starvation-for-AF_XDP.patch
new file mode 100644
index 0000000000..f8553df5b0
--- /dev/null
+++ b/patches.suse/i40e-Fix-receive-buffer-starvation-for-AF_XDP.patch
@@ -0,0 +1,35 @@
+From: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Date: Mon, 7 Oct 2019 15:07:24 -0700
+Subject: i40e: Fix receive buffer starvation for AF_XDP
+Patch-mainline: v5.4-rc6
+Git-commit: 2c19e395e061a1c1442e0623ce5ec88ecc6c5a9b
+References: jsc#SLE-8025
+
+Magnus's fix to resolve a potential receive buffer starvation for AF_XDP
+got applied to both the i40e_xsk_umem_enable/disable() functions, when it
+should have only been applied to the "enable". So clean up the undesired
+code in the disable function.
+
+CC: Magnus Karlsson <magnus.karlsson@intel.com>
+Fixes: 1f459bdc2007 ("i40e: fix potential RX buffer starvation for AF_XDP")
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/intel/i40e/i40e_xsk.c | 5 -----
+ 1 file changed, 5 deletions(-)
+
+--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+@@ -157,11 +157,6 @@ static int i40e_xsk_umem_disable(struct
+ err = i40e_queue_pair_enable(vsi, qid);
+ if (err)
+ return err;
+-
+- /* Kick start the NAPI context so that receiving will start */
+- err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX);
+- if (err)
+- return err;
+ }
+
+ return 0;
diff --git a/patches.suse/igb-Enable-media-autosense-for-the-i350.patch b/patches.suse/igb-Enable-media-autosense-for-the-i350.patch
new file mode 100644
index 0000000000..dc96673b1e
--- /dev/null
+++ b/patches.suse/igb-Enable-media-autosense-for-the-i350.patch
@@ -0,0 +1,43 @@
+From: Manfred Rudigier <manfred.rudigier@omicronenergy.com>
+Date: Thu, 15 Aug 2019 13:55:19 -0700
+Subject: igb: Enable media autosense for the i350.
+Patch-mainline: v5.4-rc6
+Git-commit: fb2308ba16bf1fd2cc3635172381e265fbfcb76d
+References: jsc#SLE-7967 jsc#SLE-8010
+
+This patch enables the hardware feature "Media Auto Sense" also on the
+i350. It works in the same way as on the 82850 devices. Hardware designs
+using dual PHYs (fiber/copper) can enable this feature by setting the MAS
+enable bits in the NVM_COMPAT register (0x03) in the EEPROM.
+
+Signed-off-by: Manfred Rudigier <manfred.rudigier@omicronenergy.com>
+Tested-by: Aaron Brown <aaron.f.brown@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/intel/igb/e1000_82575.c | 2 +-
+ drivers/net/ethernet/intel/igb/igb_main.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
++++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
+@@ -466,7 +466,7 @@ static s32 igb_init_mac_params_82575(str
+ ? igb_setup_copper_link_82575
+ : igb_setup_serdes_link_82575;
+
+- if (mac->type == e1000_82580) {
++ if (mac->type == e1000_82580 || mac->type == e1000_i350) {
+ switch (hw->device_id) {
+ /* feature not supported on these id's */
+ case E1000_DEV_ID_DH89XXCC_SGMII:
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -2371,7 +2371,7 @@ void igb_reset(struct igb_adapter *adapt
+ adapter->ei.get_invariants(hw);
+ adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
+ }
+- if ((mac->type == e1000_82575) &&
++ if ((mac->type == e1000_82575 || mac->type == e1000_i350) &&
+ (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
+ igb_enable_mas(adapter);
+ }
diff --git a/patches.suse/igb-Fix-constant-media-auto-sense-switching-when-no-.patch b/patches.suse/igb-Fix-constant-media-auto-sense-switching-when-no-.patch
new file mode 100644
index 0000000000..610540f06f
--- /dev/null
+++ b/patches.suse/igb-Fix-constant-media-auto-sense-switching-when-no-.patch
@@ -0,0 +1,41 @@
+From: Manfred Rudigier <manfred.rudigier@omicronenergy.com>
+Date: Thu, 15 Aug 2019 13:55:20 -0700
+Subject: igb: Fix constant media auto sense switching when no cable is
+ connected
+Patch-mainline: v5.4-rc6
+Git-commit: 8d5cfd7f76a2414e23c74bb8858af7540365d985
+References: jsc#SLE-7967 jsc#SLE-8010
+
+At least on the i350 there is an annoying behavior that is maybe also
+present on 82580 devices, but was probably not noticed yet as MAS is not
+widely used.
+
+If no cable is connected on both fiber/copper ports the media auto sense
+code will constantly swap between them as part of the watchdog task and
+produce many unnecessary kernel log messages.
+
+The swap code responsible for this behavior (switching to fiber) should
+not be executed if the current media type is copper and there is no signal
+detected on the fiber port. In this case we can safely wait until the
+AUTOSENSE_EN bit is cleared.
+
+Signed-off-by: Manfred Rudigier <manfred.rudigier@omicronenergy.com>
+Tested-by: Aaron Brown <aaron.f.brown@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/intel/igb/igb_main.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -2065,7 +2065,8 @@ static void igb_check_swap_media(struct
+ if ((hw->phy.media_type == e1000_media_type_copper) &&
+ (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
+ swap_now = true;
+- } else if (!(connsw & E1000_CONNSW_SERDESD)) {
++ } else if ((hw->phy.media_type != e1000_media_type_copper) &&
++ !(connsw & E1000_CONNSW_SERDESD)) {
+ /* copper signal takes time to appear */
+ if (adapter->copper_tries < 4) {
+ adapter->copper_tries++;
diff --git a/patches.suse/igb-igc-Don-t-warn-on-fatal-read-failures-when-the-d.patch b/patches.suse/igb-igc-Don-t-warn-on-fatal-read-failures-when-the-d.patch
new file mode 100644
index 0000000000..f7a2e22ab2
--- /dev/null
+++ b/patches.suse/igb-igc-Don-t-warn-on-fatal-read-failures-when-the-d.patch
@@ -0,0 +1,114 @@
+From: Lyude Paul <lyude@redhat.com>
+Date: Thu, 22 Aug 2019 14:33:18 -0400
+Subject: igb/igc: Don't warn on fatal read failures when the device is removed
+Patch-mainline: v5.4-rc6
+Git-commit: 94bc1e522b32c866d85b5af0ede55026b585ae73
+References: jsc#SLE-7967 jsc#SLE-8010
+
+Fatal read errors are worth warning about, unless of course the device
+was just unplugged from the machine - something that's a rather normal
+occurrence when the igb/igc adapter is located on a Thunderbolt dock. So,
+let's only WARN() if there's a fatal read error while the device is
+still present.
+
+This fixes the following WARN splat that's been appearing whenever I
+unplug my Caldigit TS3 Thunderbolt dock from my laptop:
+
+ igb 0000:09:00.0 enp9s0: PCIe link lost
+ ------------[ cut here ]------------
+ igb: Failed to read reg 0x18!
+ WARNING: CPU: 7 PID: 516 at
+ drivers/net/ethernet/intel/igb/igb_main.c:756 igb_rd32+0x57/0x6a [igb]
+ Modules linked in: igb dca thunderbolt fuse vfat fat elan_i2c mei_wdt
+ mei_hdcp i915 wmi_bmof intel_wmi_thunderbolt iTCO_wdt
+ iTCO_vendor_support x86_pkg_temp_thermal intel_powerclamp joydev
+ coretemp crct10dif_pclmul crc32_pclmul i2c_algo_bit ghash_clmulni_intel
+ intel_cstate drm_kms_helper intel_uncore syscopyarea sysfillrect
+ sysimgblt fb_sys_fops intel_rapl_perf intel_xhci_usb_role_switch mei_me
+ drm roles idma64 i2c_i801 ucsi_acpi typec_ucsi mei intel_lpss_pci
+ processor_thermal_device typec intel_pch_thermal intel_soc_dts_iosf
+ intel_lpss int3403_thermal thinkpad_acpi wmi int340x_thermal_zone
+ ledtrig_audio int3400_thermal acpi_thermal_rel acpi_pad video
+ pcc_cpufreq ip_tables serio_raw nvme nvme_core crc32c_intel uas
+ usb_storage e1000e i2c_dev
+ CPU: 7 PID: 516 Comm: kworker/u16:3 Not tainted 5.2.0-rc1Lyude-Test+ #14
+ Hardware name: LENOVO 20L8S2N800/20L8S2N800, BIOS N22ET35W (1.12 ) 04/09/2018
+ Workqueue: kacpi_hotplug acpi_hotplug_work_fn
+ RIP: 0010:igb_rd32+0x57/0x6a [igb]
+ Code: 87 b8 fc ff ff 48 c7 47 08 00 00 00 00 48 c7 c6 33 42 9b c0 4c 89
+ c7 e8 47 45 cd dc 89 ee 48 c7 c7 43 42 9b c0 e8 c1 94 71 dc <0f> 0b eb
+ 08 8b 00 ff c0 75 b0 eb c8 44 89 e0 5d 41 5c c3 0f 1f 44
+ RSP: 0018:ffffba5801cf7c48 EFLAGS: 00010286
+ RAX: 0000000000000000 RBX: ffff9e7956608840 RCX: 0000000000000007
+ RDX: 0000000000000000 RSI: ffffba5801cf7b24 RDI: ffff9e795e3d6a00
+ RBP: 0000000000000018 R08: 000000009dec4a01 R09: ffffffff9e61018f
+ R10: 0000000000000000 R11: ffffba5801cf7ae5 R12: 00000000ffffffff
+ R13: ffff9e7956608840 R14: ffff9e795a6f10b0 R15: 0000000000000000
+ FS: 0000000000000000(0000) GS:ffff9e795e3c0000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 0000564317bc4088 CR3: 000000010e00a006 CR4: 00000000003606e0
+ DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+ DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+ Call Trace:
+ igb_release_hw_control+0x1a/0x30 [igb]
+ igb_remove+0xc5/0x14b [igb]
+ pci_device_remove+0x3b/0x93
+ device_release_driver_internal+0xd7/0x17e
+ pci_stop_bus_device+0x36/0x75
+ pci_stop_bus_device+0x66/0x75
+ pci_stop_bus_device+0x66/0x75
+ pci_stop_and_remove_bus_device+0xf/0x19
+ trim_stale_devices+0xc5/0x13a
+ ? __pm_runtime_resume+0x6e/0x7b
+ trim_stale_devices+0x103/0x13a
+ ? __pm_runtime_resume+0x6e/0x7b
+ trim_stale_devices+0x103/0x13a
+ acpiphp_check_bridge+0xd8/0xf5
+ acpiphp_hotplug_notify+0xf7/0x14b
+ ? acpiphp_check_bridge+0xf5/0xf5
+ acpi_device_hotplug+0x357/0x3b5
+ acpi_hotplug_work_fn+0x1a/0x23
+ process_one_work+0x1a7/0x296
+ worker_thread+0x1a8/0x24c
+ ? process_scheduled_works+0x2c/0x2c
+ kthread+0xe9/0xee
+ ? kthread_destroy_worker+0x41/0x41
+ ret_from_fork+0x35/0x40
+ ---[ end trace 252bf10352c63d22 ]---
+
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Fixes: 47e16692b26b ("igb/igc: warn when fatal read failure happens")
+Acked-by: Sasha Neftin <sasha.neftin@intel.com>
+Tested-by: Aaron Brown <aaron.f.brown@intel.com>
+Acked-by: Feng Tang <feng.tang@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/intel/igb/igb_main.c | 3 ++-
+ drivers/net/ethernet/intel/igc/igc_main.c | 3 ++-
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -753,7 +753,8 @@ u32 igb_rd32(struct e1000_hw *hw, u32 re
+ struct net_device *netdev = igb->netdev;
+ hw->hw_addr = NULL;
+ netdev_err(netdev, "PCIe link lost\n");
+- WARN(1, "igb: Failed to read reg 0x%x!\n", reg);
++ WARN(pci_device_is_present(igb->pdev),
++ "igb: Failed to read reg 0x%x!\n", reg);
+ }
+
+ return value;
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -4047,7 +4047,8 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
+ hw->hw_addr = NULL;
+ netif_device_detach(netdev);
+ netdev_err(netdev, "PCIe link lost, device now detached\n");
+- WARN(1, "igc: Failed to read reg 0x%x!\n", reg);
++ WARN(pci_device_is_present(igc->pdev),
++ "igc: Failed to read reg 0x%x!\n", reg);
+ }
+
+ return value;
diff --git a/patches.suse/inet-stop-leaking-jiffies-on-the-wire.patch b/patches.suse/inet-stop-leaking-jiffies-on-the-wire.patch
new file mode 100644
index 0000000000..7807c0c278
--- /dev/null
+++ b/patches.suse/inet-stop-leaking-jiffies-on-the-wire.patch
@@ -0,0 +1,104 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 1 Nov 2019 10:32:19 -0700
+Subject: inet: stop leaking jiffies on the wire
+Patch-mainline: v5.4-rc6
+Git-commit: a904a0693c189691eeee64f6c6b188bd7dc244e9
+References: bsc#1154353
+
+Historically linux tried to stick to RFC 791, 1122, 2003
+for IPv4 ID field generation.
+
+RFC 6864 made clear that no matter how hard we try,
+we can not ensure unicity of IP ID within maximum
+lifetime for all datagrams with a given source
+address/destination address/protocol tuple.
+
+Linux uses a per socket inet generator (inet_id), initialized
+at connection startup with a XOR of 'jiffies' and other
+fields that appear clear on the wire.
+
+Thiemo Nagel pointed that this strategy is a privacy
+concern as this provides 16 bits of entropy to fingerprint
+devices.
+
+Let's switch to a random starting point, this is just as
+good as far as RFC 6864 is concerned and does not leak
+anything critical.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Thiemo Nagel <tnagel@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/crypto/chelsio/chtls/chtls_cm.c | 2 +-
+ net/dccp/ipv4.c | 2 +-
+ net/ipv4/datagram.c | 2 +-
+ net/ipv4/tcp_ipv4.c | 4 ++--
+ net/sctp/socket.c | 2 +-
+ 5 files changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
++++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
+@@ -1297,7 +1297,7 @@ static void make_established(struct sock
+ tp->write_seq = snd_isn;
+ tp->snd_nxt = snd_isn;
+ tp->snd_una = snd_isn;
+- inet_sk(sk)->inet_id = tp->write_seq ^ jiffies;
++ inet_sk(sk)->inet_id = prandom_u32();
+ assign_rxopt(sk, opt);
+
+ if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10))
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -117,7 +117,7 @@ int dccp_v4_connect(struct sock *sk, str
+ inet->inet_daddr,
+ inet->inet_sport,
+ inet->inet_dport);
+- inet->inet_id = dp->dccps_iss ^ jiffies;
++ inet->inet_id = prandom_u32();
+
+ err = dccp_connect(sk);
+ rt = NULL;
+--- a/net/ipv4/datagram.c
++++ b/net/ipv4/datagram.c
+@@ -73,7 +73,7 @@ int __ip4_datagram_connect(struct sock *
+ reuseport_has_conns(sk, true);
+ sk->sk_state = TCP_ESTABLISHED;
+ sk_set_txhash(sk);
+- inet->inet_id = jiffies;
++ inet->inet_id = prandom_u32();
+
+ sk_dst_set(sk, &rt->dst);
+ err = 0;
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -303,7 +303,7 @@ int tcp_v4_connect(struct sock *sk, stru
+ inet->inet_daddr);
+ }
+
+- inet->inet_id = tp->write_seq ^ jiffies;
++ inet->inet_id = prandom_u32();
+
+ if (tcp_fastopen_defer_connect(sk, &err))
+ return err;
+@@ -1446,7 +1446,7 @@ struct sock *tcp_v4_syn_recv_sock(const
+ inet_csk(newsk)->icsk_ext_hdr_len = 0;
+ if (inet_opt)
+ inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
+- newinet->inet_id = newtp->write_seq ^ jiffies;
++ newinet->inet_id = prandom_u32();
+
+ if (!dst) {
+ dst = inet_csk_route_child_sock(sk, newsk, req);
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -9159,7 +9159,7 @@ void sctp_copy_sock(struct sock *newsk,
+ newinet->inet_rcv_saddr = inet->inet_rcv_saddr;
+ newinet->inet_dport = htons(asoc->peer.port);
+ newinet->pmtudisc = inet->pmtudisc;
+- newinet->inet_id = asoc->next_tsn ^ jiffies;
++ newinet->inet_id = prandom_u32();
+
+ newinet->uc_ttl = inet->uc_ttl;
+ newinet->mc_loop = 1;
diff --git a/patches.suse/ip6erspan-remove-the-incorrect-mtu-limit-for-ip6ersp.patch b/patches.suse/ip6erspan-remove-the-incorrect-mtu-limit-for-ip6ersp.patch
new file mode 100644
index 0000000000..83d26b0049
--- /dev/null
+++ b/patches.suse/ip6erspan-remove-the-incorrect-mtu-limit-for-ip6ersp.patch
@@ -0,0 +1,34 @@
+From: Haishuang Yan <yanhaishuang@cmss.chinamobile.com>
+Date: Tue, 8 Oct 2019 17:56:03 +0800
+Subject: ip6erspan: remove the incorrect mtu limit for ip6erspan
+Patch-mainline: v5.4-rc4
+Git-commit: 4123f637a5129470ff9d3cb00a5a4e213f2e15cc
+References: bsc#1154353
+
+ip6erspan driver calls ether_setup(), after commit 61e84623ace3
+("net: centralize net_device min/max MTU checking"), the range
+of mtu is [min_mtu, max_mtu], which is [68, 1500] by default.
+
+It causes the dev mtu of the erspan device to not be greater
+than 1500, this limit value is not correct for ip6erspan tap
+device.
+
+Fixes: 61e84623ace3 ("net: centralize net_device min/max MTU checking")
+Signed-off-by: Haishuang Yan <yanhaishuang@cmss.chinamobile.com>
+Acked-by: William Tu <u9012063@gmail.com>
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/ipv6/ip6_gre.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -2192,6 +2192,7 @@ static void ip6erspan_tap_setup(struct n
+ {
+ ether_setup(dev);
+
++ dev->max_mtu = 0;
+ dev->netdev_ops = &ip6erspan_netdev_ops;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = ip6gre_dev_free;
diff --git a/patches.suse/ipv4-fix-IPSKB_FRAG_PMTU-handling-with-fragmentation.patch b/patches.suse/ipv4-fix-IPSKB_FRAG_PMTU-handling-with-fragmentation.patch
new file mode 100644
index 0000000000..2cc6e21029
--- /dev/null
+++ b/patches.suse/ipv4-fix-IPSKB_FRAG_PMTU-handling-with-fragmentation.patch
@@ -0,0 +1,106 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Sat, 19 Oct 2019 09:26:37 -0700
+Subject: ipv4: fix IPSKB_FRAG_PMTU handling with fragmentation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Patch-mainline: v5.4-rc6
+Git-commit: e7a409c3f46cb0dbc7bfd4f6f9421d53e92614a5
+References: bsc#1154353
+
+This patch removes the iph field from the state structure, which is not
+properly initialized. Instead, add a new field to make the "do we want
+to set DF" be the state bit and move the code to set the DF flag from
+ip_frag_next().
+
+Joint work with Pablo and Linus.
+
+Fixes: 19c3401a917b ("net: ipv4: place control buffer handling away from fragmentation iterators")
+Reported-by: Patrick Schönthaler <patrick@notvads.ovh>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ include/net/ip.h | 4 ++--
+ net/bridge/netfilter/nf_conntrack_bridge.c | 2 +-
+ net/ipv4/ip_output.c | 11 ++++++-----
+ 3 files changed, 9 insertions(+), 8 deletions(-)
+
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -184,7 +184,7 @@ static inline struct sk_buff *ip_fraglis
+ }
+
+ struct ip_frag_state {
+- struct iphdr *iph;
++ bool DF;
+ unsigned int hlen;
+ unsigned int ll_rs;
+ unsigned int mtu;
+@@ -195,7 +195,7 @@ struct ip_frag_state {
+ };
+
+ void ip_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int ll_rs,
+- unsigned int mtu, struct ip_frag_state *state);
++ unsigned int mtu, bool DF, struct ip_frag_state *state);
+ struct sk_buff *ip_frag_next(struct sk_buff *skb,
+ struct ip_frag_state *state);
+
+--- a/net/bridge/netfilter/nf_conntrack_bridge.c
++++ b/net/bridge/netfilter/nf_conntrack_bridge.c
+@@ -96,7 +96,7 @@ slow_path:
+ * This may also be a clone skbuff, we could preserve the geometry for
+ * the copies but probably not worth the effort.
+ */
+- ip_frag_init(skb, hlen, ll_rs, frag_max_size, &state);
++ ip_frag_init(skb, hlen, ll_rs, frag_max_size, false, &state);
+
+ while (state.left > 0) {
+ struct sk_buff *skb2;
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -645,11 +645,12 @@ void ip_fraglist_prepare(struct sk_buff
+ EXPORT_SYMBOL(ip_fraglist_prepare);
+
+ void ip_frag_init(struct sk_buff *skb, unsigned int hlen,
+- unsigned int ll_rs, unsigned int mtu,
++ unsigned int ll_rs, unsigned int mtu, bool DF,
+ struct ip_frag_state *state)
+ {
+ struct iphdr *iph = ip_hdr(skb);
+
++ state->DF = DF;
+ state->hlen = hlen;
+ state->ll_rs = ll_rs;
+ state->mtu = mtu;
+@@ -668,9 +669,6 @@ static void ip_frag_ipcb(struct sk_buff
+ /* Copy the flags to each fragment. */
+ IPCB(to)->flags = IPCB(from)->flags;
+
+- if (IPCB(from)->flags & IPSKB_FRAG_PMTU)
+- state->iph->frag_off |= htons(IP_DF);
+-
+ /* ANK: dirty, but effective trick. Upgrade options only if
+ * the segment to be fragmented was THE FIRST (otherwise,
+ * options are already fixed) and make it ONCE
+@@ -738,6 +736,8 @@ struct sk_buff *ip_frag_next(struct sk_b
+ */
+ iph = ip_hdr(skb2);
+ iph->frag_off = htons((state->offset >> 3));
++ if (state->DF)
++ iph->frag_off |= htons(IP_DF);
+
+ /*
+ * Added AC : If we are fragmenting a fragment that's not the
+@@ -883,7 +883,8 @@ slow_path:
+ * Fragment the datagram.
+ */
+
+- ip_frag_init(skb, hlen, ll_rs, mtu, &state);
++ ip_frag_init(skb, hlen, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU,
++ &state);
+
+ /*
+ * Keep copying data until we run out.
diff --git a/patches.suse/ipv4-fix-route-update-on-metric-change.patch b/patches.suse/ipv4-fix-route-update-on-metric-change.patch
new file mode 100644
index 0000000000..c9f37f3dfd
--- /dev/null
+++ b/patches.suse/ipv4-fix-route-update-on-metric-change.patch
@@ -0,0 +1,63 @@
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Sat, 26 Oct 2019 11:53:39 +0200
+Subject: ipv4: fix route update on metric change.
+Patch-mainline: v5.4-rc6
+Git-commit: 0b834ba00ab5337e938c727e216e1f5249794717
+References: bsc#1154353
+
+Since commit af4d768ad28c ("net/ipv4: Add support for specifying metric
+of connected routes"), when updating an IP address with a different metric,
+the associated connected route is updated, too.
+
+Still, the mentioned commit doesn't handle properly some corner cases:
+
+$ ip addr add dev eth0 192.168.1.0/24
+$ ip addr add dev eth0 192.168.2.1/32 peer 192.168.2.2
+$ ip addr add dev eth0 192.168.3.1/24
+$ ip addr change dev eth0 192.168.1.0/24 metric 10
+$ ip addr change dev eth0 192.168.2.1/32 peer 192.168.2.2 metric 10
+$ ip addr change dev eth0 192.168.3.1/24 metric 10
+$ ip -4 route
+192.168.1.0/24 dev eth0 proto kernel scope link src 192.168.1.0
+192.168.2.2 dev eth0 proto kernel scope link src 192.168.2.1
+192.168.3.0/24 dev eth0 proto kernel scope link src 192.168.2.1 metric 10
+
+Only the last route is correctly updated.
+
+The problem is the current test in fib_modify_prefix_metric():
+
+ if (!(dev->flags & IFF_UP) ||
+ ifa->ifa_flags & (IFA_F_SECONDARY | IFA_F_NOPREFIXROUTE) ||
+ ipv4_is_zeronet(prefix) ||
+ prefix == ifa->ifa_local || ifa->ifa_prefixlen == 32)
+
+Which should be the logical 'not' of the pre-existing test in
+fib_add_ifaddr():
+
+ if (!ipv4_is_zeronet(prefix) && !(ifa->ifa_flags & IFA_F_SECONDARY) &&
+ (prefix != addr || ifa->ifa_prefixlen < 32))
+
+To properly negate the original expression, we need to change the last
+logical 'or' to a logical 'and'.
+
+Fixes: af4d768ad28c ("net/ipv4: Add support for specifying metric of connected routes")
+Reported-and-suggested-by: Beniamino Galvani <bgalvani@redhat.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: David Ahern <dsahern@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/ipv4/fib_frontend.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -1147,7 +1147,7 @@ void fib_modify_prefix_metric(struct in_
+ if (!(dev->flags & IFF_UP) ||
+ ifa->ifa_flags & (IFA_F_SECONDARY | IFA_F_NOPREFIXROUTE) ||
+ ipv4_is_zeronet(prefix) ||
+- prefix == ifa->ifa_local || ifa->ifa_prefixlen == 32)
++ (prefix == ifa->ifa_local && ifa->ifa_prefixlen == 32))
+ return;
+
+ /* add the new */
diff --git a/patches.suse/ipv6-include-net-addrconf.h-for-missing-declarations.patch b/patches.suse/ipv6-include-net-addrconf.h-for-missing-declarations.patch
new file mode 100644
index 0000000000..163fb14268
--- /dev/null
+++ b/patches.suse/ipv6-include-net-addrconf.h-for-missing-declarations.patch
@@ -0,0 +1,35 @@
+From: "Ben Dooks (Codethink)" <ben.dooks@codethink.co.uk>
+Date: Tue, 22 Oct 2019 15:44:40 +0100
+Subject: ipv6: include <net/addrconf.h> for missing declarations
+Patch-mainline: v5.4-rc6
+Git-commit: 6c5d9c2a6bedbb3c3c14253776320c0ee564f064
+References: bsc#1154353
+
+Include <net/addrconf.h> for the missing declarations of
+various functions. Fixes the following sparse warnings:
+
+net/ipv6/addrconf_core.c:94:5: warning: symbol 'register_inet6addr_notifier' was not declared. Should it be static?
+net/ipv6/addrconf_core.c:100:5: warning: symbol 'unregister_inet6addr_notifier' was not declared. Should it be static?
+net/ipv6/addrconf_core.c:106:5: warning: symbol 'inet6addr_notifier_call_chain' was not declared. Should it be static?
+net/ipv6/addrconf_core.c:112:5: warning: symbol 'register_inet6addr_validator_notifier' was not declared. Should it be static?
+net/ipv6/addrconf_core.c:118:5: warning: symbol 'unregister_inet6addr_validator_notifier' was not declared. Should it be static?
+net/ipv6/addrconf_core.c:125:5: warning: symbol 'inet6addr_validator_notifier_call_chain' was not declared. Should it be static?
+net/ipv6/addrconf_core.c:237:6: warning: symbol 'in6_dev_finish_destroy' was not declared. Should it be static?
+
+Signed-off-by: Ben Dooks (Codethink) <ben.dooks@codethink.co.uk>
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/ipv6/addrconf_core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/ipv6/addrconf_core.c
++++ b/net/ipv6/addrconf_core.c
+@@ -7,6 +7,7 @@
+ #include <linux/export.h>
+ #include <net/ipv6.h>
+ #include <net/ipv6_stubs.h>
++#include <net/addrconf.h>
+ #include <net/ip.h>
+
+ /* if ipv6 module registers this function is used by xfrm to force all
diff --git a/patches.suse/ipvs-don-t-ignore-errors-in-case-refcounting-ip_vs-m.patch b/patches.suse/ipvs-don-t-ignore-errors-in-case-refcounting-ip_vs-m.patch
new file mode 100644
index 0000000000..45e67f901b
--- /dev/null
+++ b/patches.suse/ipvs-don-t-ignore-errors-in-case-refcounting-ip_vs-m.patch
@@ -0,0 +1,223 @@
+From: Davide Caratti <dcaratti@redhat.com>
+Date: Sat, 19 Oct 2019 17:34:35 +0200
+Subject: ipvs: don't ignore errors in case refcounting ip_vs module fails
+Patch-mainline: v5.4-rc6
+Git-commit: 62931f59ce9cbabb934a431f48f2f1f441c605ac
+References: bsc#1154353
+
+if the IPVS module is removed while the sync daemon is starting, there is
+a small gap where try_module_get() might fail getting the refcount inside
+ip_vs_use_count_inc(). Then, the refcounts of IPVS module are unbalanced,
+and the subsequent call to stop_sync_thread() causes the following splat:
+
+ WARNING: CPU: 0 PID: 4013 at kernel/module.c:1146 module_put.part.44+0x15b/0x290
+ Modules linked in: ip_vs(-) nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 veth ip6table_filter ip6_tables iptable_filter binfmt_misc intel_rapl_msr intel_rapl_common crct10dif_pclmul crc32_pclmul ext4 mbcache jbd2 ghash_clmulni_intel snd_hda_codec_generic ledtrig_audio snd_hda_intel snd_intel_nhlt snd_hda_codec snd_hda_core snd_hwdep snd_seq snd_seq_device snd_pcm aesni_intel crypto_simd cryptd glue_helper joydev pcspkr snd_timer virtio_balloon snd soundcore i2c_piix4 nfsd auth_rpcgss nfs_acl lockd grace sunrpc ip_tables xfs libcrc32c ata_generic pata_acpi virtio_net net_failover virtio_blk failover virtio_console qxl drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops ata_piix ttm crc32c_intel serio_raw drm virtio_pci libata virtio_ring virtio floppy dm_mirror dm_region_hash dm_log dm_mod [last unloaded: nf_defrag_ipv6]
+ CPU: 0 PID: 4013 Comm: modprobe Tainted: G W 5.4.0-rc1.upstream+ #741
+ Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
+ RIP: 0010:module_put.part.44+0x15b/0x290
+ Code: 04 25 28 00 00 00 0f 85 18 01 00 00 48 83 c4 68 5b 5d 41 5c 41 5d 41 5e 41 5f c3 89 44 24 28 83 e8 01 89 c5 0f 89 57 ff ff ff <0f> 0b e9 78 ff ff ff 65 8b 1d 67 83 26 4a 89 db be 08 00 00 00 48
+ RSP: 0018:ffff888050607c78 EFLAGS: 00010297
+ RAX: 0000000000000003 RBX: ffffffffc1420590 RCX: ffffffffb5db0ef9
+ RDX: 0000000000000000 RSI: 0000000000000004 RDI: ffffffffc1420590
+ RBP: 00000000ffffffff R08: fffffbfff82840b3 R09: fffffbfff82840b3
+ R10: 0000000000000001 R11: fffffbfff82840b2 R12: 1ffff1100a0c0f90
+ R13: ffffffffc1420200 R14: ffff88804f533300 R15: ffff88804f533ca0
+ FS: 00007f8ea9720740(0000) GS:ffff888053800000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 00007f3245abe000 CR3: 000000004c28a006 CR4: 00000000001606f0
+ Call Trace:
+ stop_sync_thread+0x3a3/0x7c0 [ip_vs]
+ ip_vs_sync_net_cleanup+0x13/0x50 [ip_vs]
+ ops_exit_list.isra.5+0x94/0x140
+ unregister_pernet_operations+0x29d/0x460
+ unregister_pernet_device+0x26/0x60
+ ip_vs_cleanup+0x11/0x38 [ip_vs]
+ __x64_sys_delete_module+0x2d5/0x400
+ do_syscall_64+0xa5/0x4e0
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+ RIP: 0033:0x7f8ea8bf0db7
+ Code: 73 01 c3 48 8b 0d b9 80 2c 00 f7 d8 64 89 01 48 83 c8 ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 b8 b0 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 89 80 2c 00 f7 d8 64 89 01 48
+ RSP: 002b:00007ffcd38d2fe8 EFLAGS: 00000206 ORIG_RAX: 00000000000000b0
+ RAX: ffffffffffffffda RBX: 0000000002436240 RCX: 00007f8ea8bf0db7
+ RDX: 0000000000000000 RSI: 0000000000000800 RDI: 00000000024362a8
+ RBP: 0000000000000000 R08: 00007f8ea8eba060 R09: 00007f8ea8c658a0
+ R10: 00007ffcd38d2a60 R11: 0000000000000206 R12: 0000000000000000
+ R13: 0000000000000001 R14: 00000000024362a8 R15: 0000000000000000
+ irq event stamp: 4538
+ hardirqs last enabled at (4537): [<ffffffffb6193dde>] quarantine_put+0x9e/0x170
+ hardirqs last disabled at (4538): [<ffffffffb5a0556a>] trace_hardirqs_off_thunk+0x1a/0x20
+ softirqs last enabled at (4522): [<ffffffffb6f8ebe9>] sk_common_release+0x169/0x2d0
+ softirqs last disabled at (4520): [<ffffffffb6f8eb3e>] sk_common_release+0xbe/0x2d0
+
+Check the return value of ip_vs_use_count_inc() and let its caller return
+proper error. Inside do_ip_vs_set_ctl() the module is already refcounted,
+we don't need refcount/derefcount there. Finally, in register_ip_vs_app()
+and start_sync_thread(), take the module refcount earlier and ensure it's
+released in the error path.
+
+Change since v1:
+ - better return values in case of failure of ip_vs_use_count_inc(),
+ thanks to Julian Anastasov
+ - no need to increase/decrease the module refcount in ip_vs_set_ctl(),
+ thanks to Julian Anastasov
+
+Signed-off-by: Davide Caratti <dcaratti@redhat.com>
+Signed-off-by: Julian Anastasov <ja@ssi.bg>
+Signed-off-by: Simon Horman <horms@verge.net.au>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/netfilter/ipvs/ip_vs_app.c | 12 ++++++++++--
+ net/netfilter/ipvs/ip_vs_ctl.c | 14 ++++----------
+ net/netfilter/ipvs/ip_vs_pe.c | 3 ++-
+ net/netfilter/ipvs/ip_vs_sched.c | 3 ++-
+ net/netfilter/ipvs/ip_vs_sync.c | 13 ++++++++++---
+ 5 files changed, 28 insertions(+), 17 deletions(-)
+
+--- a/net/netfilter/ipvs/ip_vs_app.c
++++ b/net/netfilter/ipvs/ip_vs_app.c
+@@ -193,21 +193,29 @@ struct ip_vs_app *register_ip_vs_app(str
+
+ mutex_lock(&__ip_vs_app_mutex);
+
++ /* increase the module use count */
++ if (!ip_vs_use_count_inc()) {
++ err = -ENOENT;
++ goto out_unlock;
++ }
++
+ list_for_each_entry(a, &ipvs->app_list, a_list) {
+ if (!strcmp(app->name, a->name)) {
+ err = -EEXIST;
++ /* decrease the module use count */
++ ip_vs_use_count_dec();
+ goto out_unlock;
+ }
+ }
+ a = kmemdup(app, sizeof(*app), GFP_KERNEL);
+ if (!a) {
+ err = -ENOMEM;
++ /* decrease the module use count */
++ ip_vs_use_count_dec();
+ goto out_unlock;
+ }
+ INIT_LIST_HEAD(&a->incs_list);
+ list_add(&a->a_list, &ipvs->app_list);
+- /* increase the module use count */
+- ip_vs_use_count_inc();
+
+ out_unlock:
+ mutex_unlock(&__ip_vs_app_mutex);
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -1275,7 +1275,8 @@ ip_vs_add_service(struct netns_ipvs *ipv
+ struct ip_vs_service *svc = NULL;
+
+ /* increase the module use count */
+- ip_vs_use_count_inc();
++ if (!ip_vs_use_count_inc())
++ return -ENOPROTOOPT;
+
+ /* Lookup the scheduler by 'u->sched_name' */
+ if (strcmp(u->sched_name, "none")) {
+@@ -2434,9 +2435,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
+ if (copy_from_user(arg, user, len) != 0)
+ return -EFAULT;
+
+- /* increase the module use count */
+- ip_vs_use_count_inc();
+-
+ /* Handle daemons since they have another lock */
+ if (cmd == IP_VS_SO_SET_STARTDAEMON ||
+ cmd == IP_VS_SO_SET_STOPDAEMON) {
+@@ -2449,13 +2447,13 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
+ ret = -EINVAL;
+ if (strscpy(cfg.mcast_ifn, dm->mcast_ifn,
+ sizeof(cfg.mcast_ifn)) <= 0)
+- goto out_dec;
++ return ret;
+ cfg.syncid = dm->syncid;
+ ret = start_sync_thread(ipvs, &cfg, dm->state);
+ } else {
+ ret = stop_sync_thread(ipvs, dm->state);
+ }
+- goto out_dec;
++ return ret;
+ }
+
+ mutex_lock(&__ip_vs_mutex);
+@@ -2550,10 +2548,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
+
+ out_unlock:
+ mutex_unlock(&__ip_vs_mutex);
+- out_dec:
+- /* decrease the module use count */
+- ip_vs_use_count_dec();
+-
+ return ret;
+ }
+
+--- a/net/netfilter/ipvs/ip_vs_pe.c
++++ b/net/netfilter/ipvs/ip_vs_pe.c
+@@ -68,7 +68,8 @@ int register_ip_vs_pe(struct ip_vs_pe *p
+ struct ip_vs_pe *tmp;
+
+ /* increase the module use count */
+- ip_vs_use_count_inc();
++ if (!ip_vs_use_count_inc())
++ return -ENOENT;
+
+ mutex_lock(&ip_vs_pe_mutex);
+ /* Make sure that the pe with this name doesn't exist
+--- a/net/netfilter/ipvs/ip_vs_sched.c
++++ b/net/netfilter/ipvs/ip_vs_sched.c
+@@ -179,7 +179,8 @@ int register_ip_vs_scheduler(struct ip_v
+ }
+
+ /* increase the module use count */
+- ip_vs_use_count_inc();
++ if (!ip_vs_use_count_inc())
++ return -ENOENT;
+
+ mutex_lock(&ip_vs_sched_mutex);
+
+--- a/net/netfilter/ipvs/ip_vs_sync.c
++++ b/net/netfilter/ipvs/ip_vs_sync.c
+@@ -1762,6 +1762,10 @@ int start_sync_thread(struct netns_ipvs
+ IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %zd bytes\n",
+ sizeof(struct ip_vs_sync_conn_v0));
+
++ /* increase the module use count */
++ if (!ip_vs_use_count_inc())
++ return -ENOPROTOOPT;
++
+ /* Do not hold one mutex and then to block on another */
+ for (;;) {
+ rtnl_lock();
+@@ -1892,9 +1896,6 @@ int start_sync_thread(struct netns_ipvs
+ mutex_unlock(&ipvs->sync_mutex);
+ rtnl_unlock();
+
+- /* increase the module use count */
+- ip_vs_use_count_inc();
+-
+ return 0;
+
+ out:
+@@ -1924,11 +1925,17 @@ out:
+ }
+ kfree(ti);
+ }
++
++ /* decrease the module use count */
++ ip_vs_use_count_dec();
+ return result;
+
+ out_early:
+ mutex_unlock(&ipvs->sync_mutex);
+ rtnl_unlock();
++
++ /* decrease the module use count */
++ ip_vs_use_count_dec();
+ return result;
+ }
+
diff --git a/patches.suse/ipvs-move-old_secure_tcp-into-struct-netns_ipvs.patch b/patches.suse/ipvs-move-old_secure_tcp-into-struct-netns_ipvs.patch
new file mode 100644
index 0000000000..04a48ebcc8
--- /dev/null
+++ b/patches.suse/ipvs-move-old_secure_tcp-into-struct-netns_ipvs.patch
@@ -0,0 +1,108 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 23 Oct 2019 09:53:03 -0700
+Subject: ipvs: move old_secure_tcp into struct netns_ipvs
+Patch-mainline: v5.4-rc6
+Git-commit: c24b75e0f9239e78105f81c5f03a751641eb07ef
+References: bsc#1154353
+
+syzbot reported the following issue :
+
+BUG: KCSAN: data-race in update_defense_level / update_defense_level
+
+read to 0xffffffff861a6260 of 4 bytes by task 3006 on cpu 1:
+ update_defense_level+0x621/0xb30 net/netfilter/ipvs/ip_vs_ctl.c:177
+ defense_work_handler+0x3d/0xd0 net/netfilter/ipvs/ip_vs_ctl.c:225
+ process_one_work+0x3d4/0x890 kernel/workqueue.c:2269
+ worker_thread+0xa0/0x800 kernel/workqueue.c:2415
+ kthread+0x1d4/0x200 drivers/block/aoe/aoecmd.c:1253
+ ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:352
+
+write to 0xffffffff861a6260 of 4 bytes by task 7333 on cpu 0:
+ update_defense_level+0xa62/0xb30 net/netfilter/ipvs/ip_vs_ctl.c:205
+ defense_work_handler+0x3d/0xd0 net/netfilter/ipvs/ip_vs_ctl.c:225
+ process_one_work+0x3d4/0x890 kernel/workqueue.c:2269
+ worker_thread+0xa0/0x800 kernel/workqueue.c:2415
+ kthread+0x1d4/0x200 drivers/block/aoe/aoecmd.c:1253
+ ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:352
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 0 PID: 7333 Comm: kworker/0:5 Not tainted 5.4.0-rc3+ #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+Workqueue: events defense_work_handler
+
+Indeed, old_secure_tcp is currently a static variable, while it
+needs to be a per netns variable.
+
+Fixes: a0840e2e165a ("IPVS: netns, ip_vs_ctl local vars moved to ipvs struct.")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Simon Horman <horms@verge.net.au>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ include/net/ip_vs.h | 1 +
+ net/netfilter/ipvs/ip_vs_ctl.c | 15 +++++++--------
+ 2 files changed, 8 insertions(+), 8 deletions(-)
+
+--- a/include/net/ip_vs.h
++++ b/include/net/ip_vs.h
+@@ -889,6 +889,7 @@ struct netns_ipvs {
+ struct delayed_work defense_work; /* Work handler */
+ int drop_rate;
+ int drop_counter;
++ int old_secure_tcp;
+ atomic_t dropentry;
+ /* locks in ctl.c */
+ spinlock_t dropentry_lock; /* drop entry handling */
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -93,7 +93,6 @@ static bool __ip_vs_addr_is_local_v6(str
+ static void update_defense_level(struct netns_ipvs *ipvs)
+ {
+ struct sysinfo i;
+- static int old_secure_tcp = 0;
+ int availmem;
+ int nomem;
+ int to_change = -1;
+@@ -174,35 +173,35 @@ static void update_defense_level(struct
+ spin_lock(&ipvs->securetcp_lock);
+ switch (ipvs->sysctl_secure_tcp) {
+ case 0:
+- if (old_secure_tcp >= 2)
++ if (ipvs->old_secure_tcp >= 2)
+ to_change = 0;
+ break;
+ case 1:
+ if (nomem) {
+- if (old_secure_tcp < 2)
++ if (ipvs->old_secure_tcp < 2)
+ to_change = 1;
+ ipvs->sysctl_secure_tcp = 2;
+ } else {
+- if (old_secure_tcp >= 2)
++ if (ipvs->old_secure_tcp >= 2)
+ to_change = 0;
+ }
+ break;
+ case 2:
+ if (nomem) {
+- if (old_secure_tcp < 2)
++ if (ipvs->old_secure_tcp < 2)
+ to_change = 1;
+ } else {
+- if (old_secure_tcp >= 2)
++ if (ipvs->old_secure_tcp >= 2)
+ to_change = 0;
+ ipvs->sysctl_secure_tcp = 1;
+ }
+ break;
+ case 3:
+- if (old_secure_tcp < 2)
++ if (ipvs->old_secure_tcp < 2)
+ to_change = 1;
+ break;
+ }
+- old_secure_tcp = ipvs->sysctl_secure_tcp;
++ ipvs->old_secure_tcp = ipvs->sysctl_secure_tcp;
+ if (to_change >= 0)
+ ip_vs_protocol_timeout_change(ipvs,
+ ipvs->sysctl_secure_tcp > 1);
diff --git a/patches.suse/iw_cxgb4-fix-ECN-check-on-the-passive-accept.patch b/patches.suse/iw_cxgb4-fix-ECN-check-on-the-passive-accept.patch
new file mode 100644
index 0000000000..4c4829e508
--- /dev/null
+++ b/patches.suse/iw_cxgb4-fix-ECN-check-on-the-passive-accept.patch
@@ -0,0 +1,66 @@
+From: Potnuri Bharat Teja <bharat@chelsio.com>
+Date: Thu, 3 Oct 2019 16:13:53 +0530
+Subject: iw_cxgb4: fix ECN check on the passive accept
+Patch-mainline: v5.4-rc6
+Git-commit: 612e0486ad0845c41ac10492e78144f99e326375
+References: jsc#SLE-8392
+
+pass_accept_req() is using the same skb for handling accept request and
+sending accept reply to HW. Here req and rpl structures are pointing to
+same skb->data which is over written by INIT_TP_WR() and leads to
+accessing corrupt req fields in accept_cr() while checking for ECN flags.
+Reordered code in accept_cr() to fetch correct req fields.
+
+Fixes: 92e7ae7172 ("iw_cxgb4: Choose appropriate hw mtu index and ISS for iWARP connections")
+Signed-off-by: Potnuri Bharat Teja <bharat@chelsio.com>
+Link: https://lore.kernel.org/r/20191003104353.11590-1-bharat@chelsio.com
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/cxgb4/cm.c | 28 ++++++++++++++--------------
+ 1 file changed, 14 insertions(+), 14 deletions(-)
+
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -2424,20 +2424,6 @@ static int accept_cr(struct c4iw_ep *ep,
+ enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
+
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
+-
+- skb_get(skb);
+- rpl = cplhdr(skb);
+- if (!is_t4(adapter_type)) {
+- skb_trim(skb, roundup(sizeof(*rpl5), 16));
+- rpl5 = (void *)rpl;
+- INIT_TP_WR(rpl5, ep->hwtid);
+- } else {
+- skb_trim(skb, sizeof(*rpl));
+- INIT_TP_WR(rpl, ep->hwtid);
+- }
+- OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
+- ep->hwtid));
+-
+ cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
+ enable_tcp_timestamps && req->tcpopt.tstamp,
+ (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
+@@ -2483,6 +2469,20 @@ static int accept_cr(struct c4iw_ep *ep,
+ if (tcph->ece && tcph->cwr)
+ opt2 |= CCTRL_ECN_V(1);
+ }
++
++ skb_get(skb);
++ rpl = cplhdr(skb);
++ if (!is_t4(adapter_type)) {
++ skb_trim(skb, roundup(sizeof(*rpl5), 16));
++ rpl5 = (void *)rpl;
++ INIT_TP_WR(rpl5, ep->hwtid);
++ } else {
++ skb_trim(skb, sizeof(*rpl));
++ INIT_TP_WR(rpl, ep->hwtid);
++ }
++ OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
++ ep->hwtid));
++
+ if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
+ u32 isn = (prandom_u32() & ~7UL) - 1;
+ opt2 |= T5_OPT_2_VALID_F;
diff --git a/patches.suse/ixgbe-Remove-duplicate-clear_bit-call.patch b/patches.suse/ixgbe-Remove-duplicate-clear_bit-call.patch
new file mode 100644
index 0000000000..218c79a454
--- /dev/null
+++ b/patches.suse/ixgbe-Remove-duplicate-clear_bit-call.patch
@@ -0,0 +1,27 @@
+From: Igor Pylypiv <igor.pylypiv@gmail.com>
+Date: Thu, 3 Oct 2019 23:53:57 -0700
+Subject: ixgbe: Remove duplicate clear_bit() call
+Patch-mainline: v5.4-rc6
+Git-commit: 451fe015b2857de3d8027ef606284a205e177724
+References: jsc#SLE-7979 jsc#SLE-7981
+
+__IXGBE_RX_BUILD_SKB_ENABLED bit is already cleared.
+
+Signed-off-by: Igor Pylypiv <igor.pylypiv@gmail.com>
+Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -4310,7 +4310,6 @@ static void ixgbe_set_rx_buffer_len(stru
+ if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
+ set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
+
+- clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
+ if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
+ continue;
+
diff --git a/patches.suse/keys-Fix-memory-leak-in-copy_net_ns.patch b/patches.suse/keys-Fix-memory-leak-in-copy_net_ns.patch
new file mode 100644
index 0000000000..e23adc0a48
--- /dev/null
+++ b/patches.suse/keys-Fix-memory-leak-in-copy_net_ns.patch
@@ -0,0 +1,57 @@
+From: Takeshi Misawa <jeliantsurux@gmail.com>
+Date: Sat, 19 Oct 2019 15:34:43 +0900
+Subject: keys: Fix memory leak in copy_net_ns
+Patch-mainline: v5.4-rc6
+Git-commit: 82ecff655e7968151b0047f1b5de03b249e5c1c4
+References: bsc#1154353
+
+If copy_net_ns() failed after net_alloc(), net->key_domain is leaked.
+Fix this, by freeing key_domain in error path.
+
+syzbot report:
+BUG: memory leak
+unreferenced object 0xffff8881175007e0 (size 32):
+ comm "syz-executor902", pid 7069, jiffies 4294944350 (age 28.400s)
+ hex dump (first 32 bytes):
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ backtrace:
+ [<00000000a83ed741>] kmemleak_alloc_recursive include/linux/kmemleak.h:43 [inline]
+ [<00000000a83ed741>] slab_post_alloc_hook mm/slab.h:439 [inline]
+ [<00000000a83ed741>] slab_alloc mm/slab.c:3326 [inline]
+ [<00000000a83ed741>] kmem_cache_alloc_trace+0x13d/0x280 mm/slab.c:3553
+ [<0000000059fc92b9>] kmalloc include/linux/slab.h:547 [inline]
+ [<0000000059fc92b9>] kzalloc include/linux/slab.h:742 [inline]
+ [<0000000059fc92b9>] net_alloc net/core/net_namespace.c:398 [inline]
+ [<0000000059fc92b9>] copy_net_ns+0xb2/0x220 net/core/net_namespace.c:445
+ [<00000000a9d74bbc>] create_new_namespaces+0x141/0x2a0 kernel/nsproxy.c:103
+ [<000000008047d645>] unshare_nsproxy_namespaces+0x7f/0x100 kernel/nsproxy.c:202
+ [<000000005993ea6e>] ksys_unshare+0x236/0x490 kernel/fork.c:2674
+ [<0000000019417e75>] __do_sys_unshare kernel/fork.c:2742 [inline]
+ [<0000000019417e75>] __se_sys_unshare kernel/fork.c:2740 [inline]
+ [<0000000019417e75>] __x64_sys_unshare+0x16/0x20 kernel/fork.c:2740
+ [<00000000f4c5f2c8>] do_syscall_64+0x76/0x1a0 arch/x86/entry/common.c:296
+ [<0000000038550184>] entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+syzbot also reported other leak in copy_net_ns -> setup_net.
+This problem is already fixed by cf47a0b882a4e5f6b34c7949d7b293e9287f1972.
+
+Fixes: 9b242610514f ("keys: Network namespace domain tag")
+Reported-and-tested-by: syzbot+3b3296d032353c33184b@syzkaller.appspotmail.com
+Signed-off-by: Takeshi Misawa <jeliantsurux@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/core/net_namespace.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -479,6 +479,7 @@ struct net *copy_net_ns(unsigned long fl
+
+ if (rv < 0) {
+ put_userns:
++ key_remove_domain(net->key_domain);
+ put_user_ns(user_ns);
+ net_drop_ns(net);
+ dec_ucounts:
diff --git a/patches.suse/mlxsw-core-Unpublish-devlink-parameters-during-reloa.patch b/patches.suse/mlxsw-core-Unpublish-devlink-parameters-during-reloa.patch
new file mode 100644
index 0000000000..027254e057
--- /dev/null
+++ b/patches.suse/mlxsw-core-Unpublish-devlink-parameters-during-reloa.patch
@@ -0,0 +1,51 @@
+From: Jiri Pirko <jiri@mellanox.com>
+Date: Wed, 30 Oct 2019 11:04:22 +0200
+Subject: mlxsw: core: Unpublish devlink parameters during reload
+Patch-mainline: v5.4-rc6
+Git-commit: b7265a0df82c1716bf788096217083ed65a8bb14
+References: bsc#1154488
+
+The devlink parameter "acl_region_rehash_interval" is a runtime
+parameter whose value is stored in a dynamically allocated memory. While
+reloading the driver, this memory is freed and then allocated again. A
+use-after-free might happen if during this time frame someone tries to
+retrieve its value.
+
+Since commit 070c63f20f6c ("net: devlink: allow to change namespaces
+during reload") the use-after-free can be reliably triggered when
+reloading the driver into a namespace, as after freeing the memory (via
+reload_down() callback) all the parameters are notified.
+
+Fix this by unpublishing and then re-publishing the parameters during
+reload.
+
+Fixes: 98bbf70c1c41 ("mlxsw: spectrum: add "acl_region_rehash_interval" devlink param")
+Fixes: 7c62cfb8c574 ("devlink: publish params only after driver init is done")
+Signed-off-by: Jiri Pirko <jiri@mellanox.com>
+Signed-off-by: Ido Schimmel <idosch@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlxsw/core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
+@@ -1186,7 +1186,7 @@ __mlxsw_core_bus_device_register(const s
+ if (err)
+ goto err_thermal_init;
+
+- if (mlxsw_driver->params_register && !reload)
++ if (mlxsw_driver->params_register)
+ devlink_params_publish(devlink);
+
+ return 0;
+@@ -1259,7 +1259,7 @@ void mlxsw_core_bus_device_unregister(st
+ return;
+ }
+
+- if (mlxsw_core->driver->params_unregister && !reload)
++ if (mlxsw_core->driver->params_unregister)
+ devlink_params_unpublish(devlink);
+ mlxsw_thermal_fini(mlxsw_core->thermal);
+ mlxsw_hwmon_fini(mlxsw_core->hwmon);
diff --git a/patches.suse/mlxsw-spectrum_trap-Push-Ethernet-header-before-repo.patch b/patches.suse/mlxsw-spectrum_trap-Push-Ethernet-header-before-repo.patch
new file mode 100644
index 0000000000..6f3851cb7a
--- /dev/null
+++ b/patches.suse/mlxsw-spectrum_trap-Push-Ethernet-header-before-repo.patch
@@ -0,0 +1,35 @@
+From: Ido Schimmel <idosch@mellanox.com>
+Date: Thu, 17 Oct 2019 10:11:03 +0300
+Subject: mlxsw: spectrum_trap: Push Ethernet header before reporting trap
+Patch-mainline: v5.4-rc4
+Git-commit: 2e978795bb4c14293bf6ecf32621d32529706aef
+References: bsc#1154488
+
+devlink maintains packets and bytes statistics for each trap. Since
+eth_type_trans() was called to set the skb's protocol, the data pointer
+no longer points to the start of the packet and the bytes accounting is
+off by 14 bytes.
+
+Fix this by pushing the skb's data pointer to the start of the packet.
+
+Fixes: b5ce611fd96e ("mlxsw: spectrum: Add devlink-trap support")
+Reported-by: Alex Kushnarov <alexanderk@mellanox.com>
+Tested-by: Alex Kushnarov <alexanderk@mellanox.com>
+Acked-by: Jiri Pirko <jiri@mellanox.com>
+Signed-off-by: Ido Schimmel <idosch@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+@@ -99,6 +99,7 @@ static void mlxsw_sp_rx_drop_listener(st
+ devlink = priv_to_devlink(mlxsw_sp->core);
+ in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
+ local_port);
++ skb_push(skb, ETH_HLEN);
+ devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port);
+ consume_skb(skb);
+ }
diff --git a/patches.suse/net-Zeroing-the-structure-ethtool_wolinfo-in-ethtool.patch b/patches.suse/net-Zeroing-the-structure-ethtool_wolinfo-in-ethtool.patch
new file mode 100644
index 0000000000..0a7021f81e
--- /dev/null
+++ b/patches.suse/net-Zeroing-the-structure-ethtool_wolinfo-in-ethtool.patch
@@ -0,0 +1,34 @@
+From: zhanglin <zhang.lin16@zte.com.cn>
+Date: Sat, 26 Oct 2019 15:54:16 +0800
+Subject: net: Zeroing the structure ethtool_wolinfo in ethtool_get_wol()
+Patch-mainline: v5.4-rc6
+Git-commit: 5ff223e86f5addbfae26419cbb5d61d98f6fbf7d
+References: bsc#1154353
+
+memset() the structure ethtool_wolinfo that has padded bytes
+but the padded bytes have not been zeroed out.
+
+Signed-off-by: zhanglin <zhang.lin16@zte.com.cn>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/core/ethtool.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/core/ethtool.c
++++ b/net/core/ethtool.c
+@@ -1396,11 +1396,13 @@ static int ethtool_reset(struct net_devi
+
+ static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
+ {
+- struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
++ struct ethtool_wolinfo wol;
+
+ if (!dev->ethtool_ops->get_wol)
+ return -EOPNOTSUPP;
+
++ memset(&wol, 0, sizeof(struct ethtool_wolinfo));
++ wol.cmd = ETHTOOL_GWOL;
+ dev->ethtool_ops->get_wol(dev, &wol);
+
+ if (copy_to_user(useraddr, &wol, sizeof(wol)))
diff --git a/patches.suse/net-add-READ-WRITE-_ONCE-annotations-on-rskq_accept_.patch b/patches.suse/net-add-READ-WRITE-_ONCE-annotations-on-rskq_accept_.patch
new file mode 100644
index 0000000000..f93acc5371
--- /dev/null
+++ b/patches.suse/net-add-READ-WRITE-_ONCE-annotations-on-rskq_accept_.patch
@@ -0,0 +1,65 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 9 Oct 2019 14:51:20 -0700
+Subject: net: add {READ|WRITE}_ONCE() annotations on ->rskq_accept_head
+Patch-mainline: v5.4-rc4
+Git-commit: 60b173ca3d1cd1782bd0096dc17298ec242f6fb1
+References: bsc#1154353
+
+reqsk_queue_empty() is called from inet_csk_listen_poll() while
+other cpus might write ->rskq_accept_head value.
+
+Use {READ|WRITE}_ONCE() to avoid compiler tricks
+and potential KCSAN splats.
+
+Fixes: fff1f3001cc5 ("tcp: add a spinlock to protect struct request_sock_queue")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/xen/pvcalls-back.c | 2 +-
+ include/net/request_sock.h | 4 ++--
+ net/ipv4/inet_connection_sock.c | 2 +-
+ 3 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/xen/pvcalls-back.c
++++ b/drivers/xen/pvcalls-back.c
+@@ -775,7 +775,7 @@ static int pvcalls_back_poll(struct xenb
+ mappass->reqcopy = *req;
+ icsk = inet_csk(mappass->sock->sk);
+ queue = &icsk->icsk_accept_queue;
+- data = queue->rskq_accept_head != NULL;
++ data = READ_ONCE(queue->rskq_accept_head) != NULL;
+ if (data) {
+ mappass->reqcopy.cmd = 0;
+ ret = 0;
+--- a/include/net/request_sock.h
++++ b/include/net/request_sock.h
+@@ -185,7 +185,7 @@ void reqsk_fastopen_remove(struct sock *
+
+ static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
+ {
+- return queue->rskq_accept_head == NULL;
++ return READ_ONCE(queue->rskq_accept_head) == NULL;
+ }
+
+ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
+@@ -197,7 +197,7 @@ static inline struct request_sock *reqsk
+ req = queue->rskq_accept_head;
+ if (req) {
+ sk_acceptq_removed(parent);
+- queue->rskq_accept_head = req->dl_next;
++ WRITE_ONCE(queue->rskq_accept_head, req->dl_next);
+ if (queue->rskq_accept_head == NULL)
+ queue->rskq_accept_tail = NULL;
+ }
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -934,7 +934,7 @@ struct sock *inet_csk_reqsk_queue_add(st
+ req->sk = child;
+ req->dl_next = NULL;
+ if (queue->rskq_accept_head == NULL)
+- queue->rskq_accept_head = req;
++ WRITE_ONCE(queue->rskq_accept_head, req);
+ else
+ queue->rskq_accept_tail->dl_next = req;
+ queue->rskq_accept_tail = req;
diff --git a/patches.suse/net-add-READ_ONCE-annotation-in-__skb_wait_for_more_.patch b/patches.suse/net-add-READ_ONCE-annotation-in-__skb_wait_for_more_.patch
new file mode 100644
index 0000000000..7338c1c9cc
--- /dev/null
+++ b/patches.suse/net-add-READ_ONCE-annotation-in-__skb_wait_for_more_.patch
@@ -0,0 +1,77 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 23 Oct 2019 22:44:52 -0700
+Subject: net: add READ_ONCE() annotation in __skb_wait_for_more_packets()
+Patch-mainline: v5.4-rc6
+Git-commit: 7c422d0ce97552dde4a97e6290de70ec6efb0fc6
+References: bsc#1154353
+
+__skb_wait_for_more_packets() can be called while other cpus
+can feed packets to the socket receive queue.
+
+KCSAN reported :
+
+BUG: KCSAN: data-race in __skb_wait_for_more_packets / __udp_enqueue_schedule_skb
+
+write to 0xffff888102e40b58 of 8 bytes by interrupt on cpu 0:
+ __skb_insert include/linux/skbuff.h:1852 [inline]
+ __skb_queue_before include/linux/skbuff.h:1958 [inline]
+ __skb_queue_tail include/linux/skbuff.h:1991 [inline]
+ __udp_enqueue_schedule_skb+0x2d7/0x410 net/ipv4/udp.c:1470
+ __udp_queue_rcv_skb net/ipv4/udp.c:1940 [inline]
+ udp_queue_rcv_one_skb+0x7bd/0xc70 net/ipv4/udp.c:2057
+ udp_queue_rcv_skb+0xb5/0x400 net/ipv4/udp.c:2074
+ udp_unicast_rcv_skb.isra.0+0x7e/0x1c0 net/ipv4/udp.c:2233
+ __udp4_lib_rcv+0xa44/0x17c0 net/ipv4/udp.c:2300
+ udp_rcv+0x2b/0x40 net/ipv4/udp.c:2470
+ ip_protocol_deliver_rcu+0x4d/0x420 net/ipv4/ip_input.c:204
+ ip_local_deliver_finish+0x110/0x140 net/ipv4/ip_input.c:231
+ NF_HOOK include/linux/netfilter.h:305 [inline]
+ NF_HOOK include/linux/netfilter.h:299 [inline]
+ ip_local_deliver+0x133/0x210 net/ipv4/ip_input.c:252
+ dst_input include/net/dst.h:442 [inline]
+ ip_rcv_finish+0x121/0x160 net/ipv4/ip_input.c:413
+ NF_HOOK include/linux/netfilter.h:305 [inline]
+ NF_HOOK include/linux/netfilter.h:299 [inline]
+ ip_rcv+0x18f/0x1a0 net/ipv4/ip_input.c:523
+ __netif_receive_skb_one_core+0xa7/0xe0 net/core/dev.c:5010
+ __netif_receive_skb+0x37/0xf0 net/core/dev.c:5124
+ process_backlog+0x1d3/0x420 net/core/dev.c:5955
+
+read to 0xffff888102e40b58 of 8 bytes by task 13035 on cpu 1:
+ __skb_wait_for_more_packets+0xfa/0x320 net/core/datagram.c:100
+ __skb_recv_udp+0x374/0x500 net/ipv4/udp.c:1683
+ udp_recvmsg+0xe1/0xb10 net/ipv4/udp.c:1712
+ inet_recvmsg+0xbb/0x250 net/ipv4/af_inet.c:838
+ sock_recvmsg_nosec+0x5c/0x70 net/socket.c:871
+ ___sys_recvmsg+0x1a0/0x3e0 net/socket.c:2480
+ do_recvmmsg+0x19a/0x5c0 net/socket.c:2601
+ __sys_recvmmsg+0x1ef/0x200 net/socket.c:2680
+ __do_sys_recvmmsg net/socket.c:2703 [inline]
+ __se_sys_recvmmsg net/socket.c:2696 [inline]
+ __x64_sys_recvmmsg+0x89/0xb0 net/socket.c:2696
+ do_syscall_64+0xcc/0x370 arch/x86/entry/common.c:290
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 1 PID: 13035 Comm: syz-executor.3 Not tainted 5.4.0-rc3+ #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/core/datagram.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -97,7 +97,7 @@ int __skb_wait_for_more_packets(struct s
+ if (error)
+ goto out_err;
+
+- if (sk->sk_receive_queue.prev != skb)
++ if (READ_ONCE(sk->sk_receive_queue.prev) != skb)
+ goto out;
+
+ /* Socket shut down? */
diff --git a/patches.suse/net-add-skb_queue_empty_lockless.patch b/patches.suse/net-add-skb_queue_empty_lockless.patch
new file mode 100644
index 0000000000..68f4139ebb
--- /dev/null
+++ b/patches.suse/net-add-skb_queue_empty_lockless.patch
@@ -0,0 +1,91 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 23 Oct 2019 22:44:48 -0700
+Subject: net: add skb_queue_empty_lockless()
+Patch-mainline: v5.4-rc6
+Git-commit: d7d16a89350ab263484c0aa2b523dd3a234e4a80
+References: bsc#1154353
+
+Some paths call skb_queue_empty() without holding
+the queue lock. We must use a barrier in order
+to not let the compiler do strange things, and avoid
+KCSAN splats.
+
+Adding a barrier in skb_queue_empty() might be overkill,
+I prefer adding a new helper to clearly identify
+points where the callers might be lockless. This might
+help us finding real bugs.
+
+The corresponding WRITE_ONCE() should add zero cost
+for current compilers.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ include/linux/skbuff.h | 33 ++++++++++++++++++++++++---------
+ 1 file changed, 24 insertions(+), 9 deletions(-)
+
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1490,6 +1490,19 @@ static inline int skb_queue_empty(const
+ }
+
+ /**
++ * skb_queue_empty_lockless - check if a queue is empty
++ * @list: queue head
++ *
++ * Returns true if the queue is empty, false otherwise.
++ * This variant can be used in lockless contexts.
++ */
++static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list)
++{
++ return READ_ONCE(list->next) == (const struct sk_buff *) list;
++}
++
++
++/**
+ * skb_queue_is_last - check if skb is the last entry in the queue
+ * @list: queue head
+ * @skb: buffer
+@@ -1842,9 +1855,11 @@ static inline void __skb_insert(struct s
+ struct sk_buff *prev, struct sk_buff *next,
+ struct sk_buff_head *list)
+ {
+- newsk->next = next;
+- newsk->prev = prev;
+- next->prev = prev->next = newsk;
++ /* see skb_queue_empty_lockless() for the opposite READ_ONCE() */
++ WRITE_ONCE(newsk->next, next);
++ WRITE_ONCE(newsk->prev, prev);
++ WRITE_ONCE(next->prev, newsk);
++ WRITE_ONCE(prev->next, newsk);
+ list->qlen++;
+ }
+
+@@ -1855,11 +1870,11 @@ static inline void __skb_queue_splice(co
+ struct sk_buff *first = list->next;
+ struct sk_buff *last = list->prev;
+
+- first->prev = prev;
+- prev->next = first;
++ WRITE_ONCE(first->prev, prev);
++ WRITE_ONCE(prev->next, first);
+
+- last->next = next;
+- next->prev = last;
++ WRITE_ONCE(last->next, next);
++ WRITE_ONCE(next->prev, last);
+ }
+
+ /**
+@@ -2000,8 +2015,8 @@ static inline void __skb_unlink(struct s
+ next = skb->next;
+ prev = skb->prev;
+ skb->next = skb->prev = NULL;
+- next->prev = prev;
+- prev->next = next;
++ WRITE_ONCE(next->prev, prev);
++ WRITE_ONCE(prev->next, next);
+ }
+
+ /**
diff --git a/patches.suse/net-annotate-accesses-to-sk-sk_incoming_cpu.patch b/patches.suse/net-annotate-accesses-to-sk-sk_incoming_cpu.patch
new file mode 100644
index 0000000000..1f1a3385b5
--- /dev/null
+++ b/patches.suse/net-annotate-accesses-to-sk-sk_incoming_cpu.patch
@@ -0,0 +1,156 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 30 Oct 2019 13:00:04 -0700
+Subject: net: annotate accesses to sk->sk_incoming_cpu
+Patch-mainline: v5.4-rc6
+Git-commit: 7170a977743b72cf3eb46ef6ef89885dc7ad3621
+References: bsc#1154353
+
+This socket field can be read and written by concurrent cpus.
+
+Use READ_ONCE() and WRITE_ONCE() annotations to document this,
+and avoid some compiler 'optimizations'.
+
+KCSAN reported :
+
+BUG: KCSAN: data-race in tcp_v4_rcv / tcp_v4_rcv
+
+write to 0xffff88812220763c of 4 bytes by interrupt on cpu 0:
+ sk_incoming_cpu_update include/net/sock.h:953 [inline]
+ tcp_v4_rcv+0x1b3c/0x1bb0 net/ipv4/tcp_ipv4.c:1934
+ ip_protocol_deliver_rcu+0x4d/0x420 net/ipv4/ip_input.c:204
+ ip_local_deliver_finish+0x110/0x140 net/ipv4/ip_input.c:231
+ NF_HOOK include/linux/netfilter.h:305 [inline]
+ NF_HOOK include/linux/netfilter.h:299 [inline]
+ ip_local_deliver+0x133/0x210 net/ipv4/ip_input.c:252
+ dst_input include/net/dst.h:442 [inline]
+ ip_rcv_finish+0x121/0x160 net/ipv4/ip_input.c:413
+ NF_HOOK include/linux/netfilter.h:305 [inline]
+ NF_HOOK include/linux/netfilter.h:299 [inline]
+ ip_rcv+0x18f/0x1a0 net/ipv4/ip_input.c:523
+ __netif_receive_skb_one_core+0xa7/0xe0 net/core/dev.c:5010
+ __netif_receive_skb+0x37/0xf0 net/core/dev.c:5124
+ process_backlog+0x1d3/0x420 net/core/dev.c:5955
+ napi_poll net/core/dev.c:6392 [inline]
+ net_rx_action+0x3ae/0xa90 net/core/dev.c:6460
+ __do_softirq+0x115/0x33f kernel/softirq.c:292
+ do_softirq_own_stack+0x2a/0x40 arch/x86/entry/entry_64.S:1082
+ do_softirq.part.0+0x6b/0x80 kernel/softirq.c:337
+ do_softirq kernel/softirq.c:329 [inline]
+ __local_bh_enable_ip+0x76/0x80 kernel/softirq.c:189
+
+read to 0xffff88812220763c of 4 bytes by interrupt on cpu 1:
+ sk_incoming_cpu_update include/net/sock.h:952 [inline]
+ tcp_v4_rcv+0x181a/0x1bb0 net/ipv4/tcp_ipv4.c:1934
+ ip_protocol_deliver_rcu+0x4d/0x420 net/ipv4/ip_input.c:204
+ ip_local_deliver_finish+0x110/0x140 net/ipv4/ip_input.c:231
+ NF_HOOK include/linux/netfilter.h:305 [inline]
+ NF_HOOK include/linux/netfilter.h:299 [inline]
+ ip_local_deliver+0x133/0x210 net/ipv4/ip_input.c:252
+ dst_input include/net/dst.h:442 [inline]
+ ip_rcv_finish+0x121/0x160 net/ipv4/ip_input.c:413
+ NF_HOOK include/linux/netfilter.h:305 [inline]
+ NF_HOOK include/linux/netfilter.h:299 [inline]
+ ip_rcv+0x18f/0x1a0 net/ipv4/ip_input.c:523
+ __netif_receive_skb_one_core+0xa7/0xe0 net/core/dev.c:5010
+ __netif_receive_skb+0x37/0xf0 net/core/dev.c:5124
+ process_backlog+0x1d3/0x420 net/core/dev.c:5955
+ napi_poll net/core/dev.c:6392 [inline]
+ net_rx_action+0x3ae/0xa90 net/core/dev.c:6460
+ __do_softirq+0x115/0x33f kernel/softirq.c:292
+ run_ksoftirqd+0x46/0x60 kernel/softirq.c:603
+ smpboot_thread_fn+0x37d/0x4a0 kernel/smpboot.c:165
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 1 PID: 16 Comm: ksoftirqd/1 Not tainted 5.4.0-rc3+ #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ include/net/sock.h | 4 ++--
+ net/core/sock.c | 4 ++--
+ net/ipv4/inet_hashtables.c | 2 +-
+ net/ipv4/udp.c | 2 +-
+ net/ipv6/inet6_hashtables.c | 2 +-
+ net/ipv6/udp.c | 2 +-
+ 6 files changed, 8 insertions(+), 8 deletions(-)
+
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -954,8 +954,8 @@ static inline void sk_incoming_cpu_updat
+ {
+ int cpu = raw_smp_processor_id();
+
+- if (unlikely(sk->sk_incoming_cpu != cpu))
+- sk->sk_incoming_cpu = cpu;
++ if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu))
++ WRITE_ONCE(sk->sk_incoming_cpu, cpu);
+ }
+
+ static inline void sock_rps_record_flow_hash(__u32 hash)
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1127,7 +1127,7 @@ set_rcvbuf:
+ break;
+ }
+ case SO_INCOMING_CPU:
+- sk->sk_incoming_cpu = val;
++ WRITE_ONCE(sk->sk_incoming_cpu, val);
+ break;
+
+ case SO_CNX_ADVICE:
+@@ -1476,7 +1476,7 @@ int sock_getsockopt(struct socket *sock,
+ break;
+
+ case SO_INCOMING_CPU:
+- v.val = sk->sk_incoming_cpu;
++ v.val = READ_ONCE(sk->sk_incoming_cpu);
+ break;
+
+ case SO_MEMINFO:
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -240,7 +240,7 @@ static inline int compute_score(struct s
+ return -1;
+
+ score = sk->sk_family == PF_INET ? 2 : 1;
+- if (sk->sk_incoming_cpu == raw_smp_processor_id())
++ if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
+ score++;
+ }
+ return score;
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -388,7 +388,7 @@ static int compute_score(struct sock *sk
+ return -1;
+ score += 4;
+
+- if (sk->sk_incoming_cpu == raw_smp_processor_id())
++ if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
+ score++;
+ return score;
+ }
+--- a/net/ipv6/inet6_hashtables.c
++++ b/net/ipv6/inet6_hashtables.c
+@@ -105,7 +105,7 @@ static inline int compute_score(struct s
+ return -1;
+
+ score = 1;
+- if (sk->sk_incoming_cpu == raw_smp_processor_id())
++ if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
+ score++;
+ }
+ return score;
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -135,7 +135,7 @@ static int compute_score(struct sock *sk
+ return -1;
+ score++;
+
+- if (sk->sk_incoming_cpu == raw_smp_processor_id())
++ if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
+ score++;
+
+ return score;
diff --git a/patches.suse/net-annotate-lockless-accesses-to-sk-sk_napi_id.patch b/patches.suse/net-annotate-lockless-accesses-to-sk-sk_napi_id.patch
new file mode 100644
index 0000000000..4bfd948c04
--- /dev/null
+++ b/patches.suse/net-annotate-lockless-accesses-to-sk-sk_napi_id.patch
@@ -0,0 +1,96 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Tue, 29 Oct 2019 10:54:44 -0700
+Subject: net: annotate lockless accesses to sk->sk_napi_id
+Patch-mainline: v5.4-rc6
+Git-commit: ee8d153d46a3b98c064ee15c0c0a3bbf1450e5a1
+References: bsc#1154353
+
+We already annotated most accesses to sk->sk_napi_id
+
+We missed sk_mark_napi_id() and sk_mark_napi_id_once()
+which might be called without socket lock held in UDP stack.
+
+KCSAN reported :
+BUG: KCSAN: data-race in udpv6_queue_rcv_one_skb / udpv6_queue_rcv_one_skb
+
+write to 0xffff888121c6d108 of 4 bytes by interrupt on cpu 0:
+ sk_mark_napi_id include/net/busy_poll.h:125 [inline]
+ __udpv6_queue_rcv_skb net/ipv6/udp.c:571 [inline]
+ udpv6_queue_rcv_one_skb+0x70c/0xb40 net/ipv6/udp.c:672
+ udpv6_queue_rcv_skb+0xb5/0x400 net/ipv6/udp.c:689
+ udp6_unicast_rcv_skb.isra.0+0xd7/0x180 net/ipv6/udp.c:832
+ __udp6_lib_rcv+0x69c/0x1770 net/ipv6/udp.c:913
+ udpv6_rcv+0x2b/0x40 net/ipv6/udp.c:1015
+ ip6_protocol_deliver_rcu+0x22a/0xbe0 net/ipv6/ip6_input.c:409
+ ip6_input_finish+0x30/0x50 net/ipv6/ip6_input.c:450
+ NF_HOOK include/linux/netfilter.h:305 [inline]
+ NF_HOOK include/linux/netfilter.h:299 [inline]
+ ip6_input+0x177/0x190 net/ipv6/ip6_input.c:459
+ dst_input include/net/dst.h:442 [inline]
+ ip6_rcv_finish+0x110/0x140 net/ipv6/ip6_input.c:76
+ NF_HOOK include/linux/netfilter.h:305 [inline]
+ NF_HOOK include/linux/netfilter.h:299 [inline]
+ ipv6_rcv+0x1a1/0x1b0 net/ipv6/ip6_input.c:284
+ __netif_receive_skb_one_core+0xa7/0xe0 net/core/dev.c:5010
+ __netif_receive_skb+0x37/0xf0 net/core/dev.c:5124
+ process_backlog+0x1d3/0x420 net/core/dev.c:5955
+ napi_poll net/core/dev.c:6392 [inline]
+ net_rx_action+0x3ae/0xa90 net/core/dev.c:6460
+
+write to 0xffff888121c6d108 of 4 bytes by interrupt on cpu 1:
+ sk_mark_napi_id include/net/busy_poll.h:125 [inline]
+ __udpv6_queue_rcv_skb net/ipv6/udp.c:571 [inline]
+ udpv6_queue_rcv_one_skb+0x70c/0xb40 net/ipv6/udp.c:672
+ udpv6_queue_rcv_skb+0xb5/0x400 net/ipv6/udp.c:689
+ udp6_unicast_rcv_skb.isra.0+0xd7/0x180 net/ipv6/udp.c:832
+ __udp6_lib_rcv+0x69c/0x1770 net/ipv6/udp.c:913
+ udpv6_rcv+0x2b/0x40 net/ipv6/udp.c:1015
+ ip6_protocol_deliver_rcu+0x22a/0xbe0 net/ipv6/ip6_input.c:409
+ ip6_input_finish+0x30/0x50 net/ipv6/ip6_input.c:450
+ NF_HOOK include/linux/netfilter.h:305 [inline]
+ NF_HOOK include/linux/netfilter.h:299 [inline]
+ ip6_input+0x177/0x190 net/ipv6/ip6_input.c:459
+ dst_input include/net/dst.h:442 [inline]
+ ip6_rcv_finish+0x110/0x140 net/ipv6/ip6_input.c:76
+ NF_HOOK include/linux/netfilter.h:305 [inline]
+ NF_HOOK include/linux/netfilter.h:299 [inline]
+ ipv6_rcv+0x1a1/0x1b0 net/ipv6/ip6_input.c:284
+ __netif_receive_skb_one_core+0xa7/0xe0 net/core/dev.c:5010
+ __netif_receive_skb+0x37/0xf0 net/core/dev.c:5124
+ process_backlog+0x1d3/0x420 net/core/dev.c:5955
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 1 PID: 10890 Comm: syz-executor.0 Not tainted 5.4.0-rc3+ #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+
+Fixes: e68b6e50fa35 ("udp: enable busy polling for all sockets")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ include/net/busy_poll.h | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/include/net/busy_poll.h
++++ b/include/net/busy_poll.h
+@@ -122,7 +122,7 @@ static inline void skb_mark_napi_id(stru
+ static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
+ {
+ #ifdef CONFIG_NET_RX_BUSY_POLL
+- sk->sk_napi_id = skb->napi_id;
++ WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
+ #endif
+ sk_rx_queue_set(sk, skb);
+ }
+@@ -132,8 +132,8 @@ static inline void sk_mark_napi_id_once(
+ const struct sk_buff *skb)
+ {
+ #ifdef CONFIG_NET_RX_BUSY_POLL
+- if (!sk->sk_napi_id)
+- sk->sk_napi_id = skb->napi_id;
++ if (!READ_ONCE(sk->sk_napi_id))
++ WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
+ #endif
+ }
+
diff --git a/patches.suse/net-annotate-sk-sk_rcvlowat-lockless-reads.patch b/patches.suse/net-annotate-sk-sk_rcvlowat-lockless-reads.patch
new file mode 100644
index 0000000000..3dcf3595dc
--- /dev/null
+++ b/patches.suse/net-annotate-sk-sk_rcvlowat-lockless-reads.patch
@@ -0,0 +1,83 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 9 Oct 2019 15:32:35 -0700
+Subject: net: annotate sk->sk_rcvlowat lockless reads
+Patch-mainline: v5.4-rc4
+Git-commit: eac66402d1c342f07ff38f8d631ff95eb7ad3220
+References: bsc#1154353
+
+sock_rcvlowat() or int_sk_rcvlowat() might be called without the socket
+lock for example from tcp_poll().
+
+Use READ_ONCE() to document the fact that other cpus might change
+sk->sk_rcvlowat under us and avoid KCSAN splats.
+
+Use WRITE_ONCE() on write sides too.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ include/net/sock.h | 4 +++-
+ net/core/filter.c | 2 +-
+ net/core/sock.c | 2 +-
+ net/ipv4/tcp.c | 2 +-
+ net/sched/em_meta.c | 2 +-
+ 5 files changed, 7 insertions(+), 5 deletions(-)
+
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -2271,7 +2271,9 @@ static inline long sock_sndtimeo(const s
+
+ static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
+ {
+- return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
++ int v = waitall ? len : min_t(int, READ_ONCE(sk->sk_rcvlowat), len);
++
++ return v ?: 1;
+ }
+
+ /* Alas, with timeout socket operations are not restartable.
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -4274,7 +4274,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_so
+ case SO_RCVLOWAT:
+ if (val < 0)
+ val = INT_MAX;
+- sk->sk_rcvlowat = val ? : 1;
++ WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
+ break;
+ case SO_MARK:
+ if (sk->sk_mark != val) {
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -974,7 +974,7 @@ set_rcvbuf:
+ if (sock->ops->set_rcvlowat)
+ ret = sock->ops->set_rcvlowat(sk, val);
+ else
+- sk->sk_rcvlowat = val ? : 1;
++ WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
+ break;
+
+ case SO_RCVTIMEO_OLD:
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1698,7 +1698,7 @@ int tcp_set_rcvlowat(struct sock *sk, in
+ else
+ cap = sock_net(sk)->ipv4.sysctl_tcp_rmem[2] >> 1;
+ val = min(val, cap);
+- sk->sk_rcvlowat = val ? : 1;
++ WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
+
+ /* Check if we need to signal EPOLLIN right now */
+ tcp_data_ready(sk);
+--- a/net/sched/em_meta.c
++++ b/net/sched/em_meta.c
+@@ -554,7 +554,7 @@ META_COLLECTOR(int_sk_rcvlowat)
+ *err = -1;
+ return;
+ }
+- dst->value = sk->sk_rcvlowat;
++ dst->value = READ_ONCE(sk->sk_rcvlowat);
+ }
+
+ META_COLLECTOR(int_sk_rcvtimeo)
diff --git a/patches.suse/net-avoid-possible-false-sharing-in-sk_leave_memory_.patch b/patches.suse/net-avoid-possible-false-sharing-in-sk_leave_memory_.patch
new file mode 100644
index 0000000000..5387040c18
--- /dev/null
+++ b/patches.suse/net-avoid-possible-false-sharing-in-sk_leave_memory_.patch
@@ -0,0 +1,41 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 9 Oct 2019 12:55:53 -0700
+Subject: net: avoid possible false sharing in sk_leave_memory_pressure()
+Patch-mainline: v5.4-rc4
+Git-commit: 503978aca46124cd714703e180b9c8292ba50ba7
+References: bsc#1154353
+
+As mentioned in https://github.com/google/ktsan/wiki/READ_ONCE-and-WRITE_ONCE#it-may-improve-performance
+a C compiler can legally transform :
+
+if (memory_pressure && *memory_pressure)
+ *memory_pressure = 0;
+
+to :
+
+if (memory_pressure)
+ *memory_pressure = 0;
+
+Fixes: 0604475119de ("tcp: add TCPMemoryPressuresChrono counter")
+Fixes: 180d8cd942ce ("foundations of per-cgroup memory pressure controlling.")
+Fixes: 3ab224be6d69 ("[NET] CORE: Introducing new memory accounting interface.")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/core/sock.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2334,8 +2334,8 @@ static void sk_leave_memory_pressure(str
+ } else {
+ unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
+
+- if (memory_pressure && *memory_pressure)
+- *memory_pressure = 0;
++ if (memory_pressure && READ_ONCE(*memory_pressure))
++ WRITE_ONCE(*memory_pressure, 0);
+ }
+ }
+
diff --git a/patches.suse/net-cavium-Use-the-correct-style-for-SPDX-License-Id.patch b/patches.suse/net-cavium-Use-the-correct-style-for-SPDX-License-Id.patch
new file mode 100644
index 0000000000..acdc4f6974
--- /dev/null
+++ b/patches.suse/net-cavium-Use-the-correct-style-for-SPDX-License-Id.patch
@@ -0,0 +1,32 @@
+From: Nishad Kamdar <nishadkamdar@gmail.com>
+Date: Sat, 12 Oct 2019 18:42:28 +0530
+Subject: net: cavium: Use the correct style for SPDX License Identifier
+Patch-mainline: v5.4-rc4
+Git-commit: a03681dd5d1bf7b0ed3aad3162f7810af4c4e05b
+References: bsc#1154353
+
+This patch corrects the SPDX License Identifier style
+in header files related to Cavium Ethernet drivers.
+For C header files Documentation/process/license-rules.rst
+mandates C-like comments (opposed to C source files where
+C++ style should be used)
+
+Changes made by using a script provided by Joe Perches here:
+https://lkml.org/lkml/2019/2/7/46.
+
+Suggested-by: Joe Perches <joe@perches.com>
+Signed-off-by: Nishad Kamdar <nishadkamdar@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/cavium/common/cavium_ptp.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/cavium/common/cavium_ptp.h
++++ b/drivers/net/ethernet/cavium/common/cavium_ptp.h
+@@ -1,4 +1,4 @@
+-// SPDX-License-Identifier: GPL-2.0
++/* SPDX-License-Identifier: GPL-2.0 */
+ /* cavium_ptp.h - PTP 1588 clock on Cavium hardware
+ * Copyright (c) 2003-2015, 2017 Cavium, Inc.
+ */
diff --git a/patches.suse/net-cls_bpf-fix-NULL-deref-on-offload-filter-removal.patch b/patches.suse/net-cls_bpf-fix-NULL-deref-on-offload-filter-removal.patch
new file mode 100644
index 0000000000..837b3e4fc5
--- /dev/null
+++ b/patches.suse/net-cls_bpf-fix-NULL-deref-on-offload-filter-removal.patch
@@ -0,0 +1,44 @@
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+Date: Thu, 31 Oct 2019 20:06:59 -0700
+Subject: net: cls_bpf: fix NULL deref on offload filter removal
+Patch-mainline: v5.4-rc6
+Git-commit: 41aa29a58b5f7f7be43f35372ef411f304a87a0d
+References: bsc#1154353
+
+Commit 401192113730 ("net: sched: refactor block offloads counter
+usage") missed the fact that either new prog or old prog may be
+NULL.
+
+Fixes: 401192113730 ("net: sched: refactor block offloads counter usage")
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/sched/cls_bpf.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/net/sched/cls_bpf.c
++++ b/net/sched/cls_bpf.c
+@@ -162,16 +162,20 @@ static int cls_bpf_offload_cmd(struct tc
+ cls_bpf.name = obj->bpf_name;
+ cls_bpf.exts_integrated = obj->exts_integrated;
+
+- if (oldprog)
++ if (oldprog && prog)
+ err = tc_setup_cb_replace(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
+ skip_sw, &oldprog->gen_flags,
+ &oldprog->in_hw_count,
+ &prog->gen_flags, &prog->in_hw_count,
+ true);
+- else
++ else if (prog)
+ err = tc_setup_cb_add(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
+ skip_sw, &prog->gen_flags,
+ &prog->in_hw_count, true);
++ else
++ err = tc_setup_cb_destroy(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
++ skip_sw, &oldprog->gen_flags,
++ &oldprog->in_hw_count, true);
+
+ if (prog && err) {
+ cls_bpf_offload_cmd(tp, oldprog, prog, extack);
diff --git a/patches.suse/net-core-add-generic-lockdep-keys.patch b/patches.suse/net-core-add-generic-lockdep-keys.patch
new file mode 100644
index 0000000000..c48bef055c
--- /dev/null
+++ b/patches.suse/net-core-add-generic-lockdep-keys.patch
@@ -0,0 +1,896 @@
+From: Taehee Yoo <ap420073@gmail.com>
+Date: Mon, 21 Oct 2019 18:47:51 +0000
+Subject: net: core: add generic lockdep keys
+Patch-mainline: v5.4-rc6
+Git-commit: ab92d68fc22f9afab480153bd82a20f6e2533769
+References: bsc#1154353
+
+Some interface types could be nested.
+(VLAN, BONDING, TEAM, MACSEC, MACVLAN, IPVLAN, VIRT_WIFI, VXLAN, etc..)
+These interface types should set lockdep class because, without lockdep
+class key, lockdep always warn about unexisting circular locking.
+
+In the current code, these interfaces have their own lockdep class keys and
+these manage itself. So that there are so many duplicate code around the
+/driver/net and /net/.
+This patch adds new generic lockdep keys and some helper functions for it.
+
+This patch does below changes.
+a) Add lockdep class keys in struct net_device
+ - qdisc_running, xmit, addr_list, qdisc_busylock
+ - these keys are used as dynamic lockdep key.
+b) When net_device is being allocated, lockdep keys are registered.
+ - alloc_netdev_mqs()
+c) When net_device is being free'd llockdep keys are unregistered.
+ - free_netdev()
+d) Add generic lockdep key helper function
+ - netdev_register_lockdep_key()
+ - netdev_unregister_lockdep_key()
+ - netdev_update_lockdep_key()
+e) Remove unnecessary generic lockdep macro and functions
+f) Remove unnecessary lockdep code of each interfaces.
+
+After this patch, each interface modules don't need to maintain
+their lockdep keys.
+
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/bonding/bond_main.c | 1
+ drivers/net/ethernet/netronome/nfp/nfp_net_repr.c | 18 ---
+ drivers/net/hamradio/bpqether.c | 22 ---
+ drivers/net/hyperv/netvsc_drv.c | 2
+ drivers/net/ipvlan/ipvlan_main.c | 2
+ drivers/net/macsec.c | 5
+ drivers/net/macvlan.c | 12 --
+ drivers/net/ppp/ppp_generic.c | 2
+ drivers/net/team/team.c | 2
+ drivers/net/vrf.c | 1
+ drivers/net/wireless/intersil/hostap/hostap_hw.c | 25 ----
+ include/linux/netdevice.h | 35 ++----
+ net/8021q/vlan_dev.c | 27 ----
+ net/batman-adv/soft-interface.c | 32 -----
+ net/bluetooth/6lowpan.c | 8 -
+ net/bridge/br_device.c | 8 -
+ net/core/dev.c | 127 +++++++---------------
+ net/core/rtnetlink.c | 1
+ net/dsa/master.c | 5
+ net/dsa/slave.c | 12 --
+ net/ieee802154/6lowpan/core.c | 8 -
+ net/l2tp/l2tp_eth.c | 1
+ net/netrom/af_netrom.c | 23 ---
+ net/rose/af_rose.c | 23 ---
+ net/sched/sch_generic.c | 17 +-
+ 25 files changed, 63 insertions(+), 356 deletions(-)
+
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -4769,7 +4769,6 @@ static int bond_init(struct net_device *
+ return -ENOMEM;
+
+ bond->nest_level = SINGLE_DEPTH_NESTING;
+- netdev_lockdep_set_classes(bond_dev);
+
+ list_add_tail(&bond->bond_list, &bn->dev_list);
+
+--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+@@ -299,22 +299,6 @@ static void nfp_repr_clean(struct nfp_re
+ nfp_port_free(repr->port);
+ }
+
+-static struct lock_class_key nfp_repr_netdev_xmit_lock_key;
+-static struct lock_class_key nfp_repr_netdev_addr_lock_key;
+-
+-static void nfp_repr_set_lockdep_class_one(struct net_device *dev,
+- struct netdev_queue *txq,
+- void *_unused)
+-{
+- lockdep_set_class(&txq->_xmit_lock, &nfp_repr_netdev_xmit_lock_key);
+-}
+-
+-static void nfp_repr_set_lockdep_class(struct net_device *dev)
+-{
+- lockdep_set_class(&dev->addr_list_lock, &nfp_repr_netdev_addr_lock_key);
+- netdev_for_each_tx_queue(dev, nfp_repr_set_lockdep_class_one, NULL);
+-}
+-
+ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
+ u32 cmsg_port_id, struct nfp_port *port,
+ struct net_device *pf_netdev)
+@@ -324,8 +308,6 @@ int nfp_repr_init(struct nfp_app *app, s
+ u32 repr_cap = nn->tlv_caps.repr_cap;
+ int err;
+
+- nfp_repr_set_lockdep_class(netdev);
+-
+ repr->port = port;
+ repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL);
+ if (!repr->dst)
+--- a/drivers/net/hamradio/bpqether.c
++++ b/drivers/net/hamradio/bpqether.c
+@@ -107,27 +107,6 @@ struct bpqdev {
+
+ static LIST_HEAD(bpq_devices);
+
+-/*
+- * bpqether network devices are paired with ethernet devices below them, so
+- * form a special "super class" of normal ethernet devices; split their locks
+- * off into a separate class since they always nest.
+- */
+-static struct lock_class_key bpq_netdev_xmit_lock_key;
+-static struct lock_class_key bpq_netdev_addr_lock_key;
+-
+-static void bpq_set_lockdep_class_one(struct net_device *dev,
+- struct netdev_queue *txq,
+- void *_unused)
+-{
+- lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key);
+-}
+-
+-static void bpq_set_lockdep_class(struct net_device *dev)
+-{
+- lockdep_set_class(&dev->addr_list_lock, &bpq_netdev_addr_lock_key);
+- netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL);
+-}
+-
+ /* ------------------------------------------------------------------------ */
+
+
+@@ -498,7 +477,6 @@ static int bpq_new_device(struct net_dev
+ err = register_netdevice(ndev);
+ if (err)
+ goto error;
+- bpq_set_lockdep_class(ndev);
+
+ /* List protected by RTNL */
+ list_add_rcu(&bpq->bpq_list, &bpq_devices);
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -2335,8 +2335,6 @@ static int netvsc_probe(struct hv_device
+ NETIF_F_HW_VLAN_CTAG_RX;
+ net->vlan_features = net->features;
+
+- netdev_lockdep_set_classes(net);
+-
+ /* MTU range: 68 - 1500 or 65521 */
+ net->min_mtu = NETVSC_MTU_MIN;
+ if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
+--- a/drivers/net/ipvlan/ipvlan_main.c
++++ b/drivers/net/ipvlan/ipvlan_main.c
+@@ -130,8 +130,6 @@ static int ipvlan_init(struct net_device
+ dev->gso_max_segs = phy_dev->gso_max_segs;
+ dev->hard_header_len = phy_dev->hard_header_len;
+
+- netdev_lockdep_set_classes(dev);
+-
+ ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats);
+ if (!ipvlan->pcpu_stats)
+ return -ENOMEM;
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -2750,7 +2750,6 @@ static netdev_tx_t macsec_start_xmit(str
+
+ #define MACSEC_FEATURES \
+ (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
+-static struct lock_class_key macsec_netdev_addr_lock_key;
+
+ static int macsec_dev_init(struct net_device *dev)
+ {
+@@ -3264,10 +3263,6 @@ static int macsec_newlink(struct net *ne
+ dev_hold(real_dev);
+
+ macsec->nest_level = dev_get_nest_level(real_dev) + 1;
+- netdev_lockdep_set_classes(dev);
+- lockdep_set_class_and_subclass(&dev->addr_list_lock,
+- &macsec_netdev_addr_lock_key,
+- macsec_get_nest_level(dev));
+
+ err = netdev_upper_dev_link(real_dev, dev, extack);
+ if (err < 0)
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -852,8 +852,6 @@ static int macvlan_do_ioctl(struct net_d
+ * "super class" of normal network devices; split their locks off into a
+ * separate class since they always nest.
+ */
+-static struct lock_class_key macvlan_netdev_addr_lock_key;
+-
+ #define ALWAYS_ON_OFFLOADS \
+ (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \
+ NETIF_F_GSO_ROBUST | NETIF_F_GSO_ENCAP_ALL)
+@@ -874,14 +872,6 @@ static int macvlan_get_nest_level(struct
+ return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
+ }
+
+-static void macvlan_set_lockdep_class(struct net_device *dev)
+-{
+- netdev_lockdep_set_classes(dev);
+- lockdep_set_class_and_subclass(&dev->addr_list_lock,
+- &macvlan_netdev_addr_lock_key,
+- macvlan_get_nest_level(dev));
+-}
+-
+ static int macvlan_init(struct net_device *dev)
+ {
+ struct macvlan_dev *vlan = netdev_priv(dev);
+@@ -900,8 +890,6 @@ static int macvlan_init(struct net_devic
+ dev->gso_max_segs = lowerdev->gso_max_segs;
+ dev->hard_header_len = lowerdev->hard_header_len;
+
+- macvlan_set_lockdep_class(dev);
+-
+ vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
+ if (!vlan->pcpu_stats)
+ return -ENOMEM;
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -1324,8 +1324,6 @@ static int ppp_dev_init(struct net_devic
+ {
+ struct ppp *ppp;
+
+- netdev_lockdep_set_classes(dev);
+-
+ ppp = netdev_priv(dev);
+ /* Let the netdevice take a reference on the ppp file. This ensures
+ * that ppp_destroy_interface() won't run before the device gets
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -1642,8 +1642,6 @@ static int team_init(struct net_device *
+ goto err_options_register;
+ netif_carrier_off(dev);
+
+- netdev_lockdep_set_classes(dev);
+-
+ return 0;
+
+ err_options_register:
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -865,7 +865,6 @@ static int vrf_dev_init(struct net_devic
+
+ /* similarly, oper state is irrelevant; set to up to avoid confusion */
+ dev->operstate = IF_OPER_UP;
+- netdev_lockdep_set_classes(dev);
+ return 0;
+
+ out_rth:
+--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
++++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
+@@ -3041,30 +3041,6 @@ static void prism2_clear_set_tim_queue(l
+ }
+ }
+
+-
+-/*
+- * HostAP uses two layers of net devices, where the inner
+- * layer gets called all the time from the outer layer.
+- * This is a natural nesting, which needs a split lock type.
+- */
+-static struct lock_class_key hostap_netdev_xmit_lock_key;
+-static struct lock_class_key hostap_netdev_addr_lock_key;
+-
+-static void prism2_set_lockdep_class_one(struct net_device *dev,
+- struct netdev_queue *txq,
+- void *_unused)
+-{
+- lockdep_set_class(&txq->_xmit_lock,
+- &hostap_netdev_xmit_lock_key);
+-}
+-
+-static void prism2_set_lockdep_class(struct net_device *dev)
+-{
+- lockdep_set_class(&dev->addr_list_lock,
+- &hostap_netdev_addr_lock_key);
+- netdev_for_each_tx_queue(dev, prism2_set_lockdep_class_one, NULL);
+-}
+-
+ static struct net_device *
+ prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
+ struct device *sdev)
+@@ -3223,7 +3199,6 @@ while (0)
+ if (ret >= 0)
+ ret = register_netdevice(dev);
+
+- prism2_set_lockdep_class(dev);
+ rtnl_unlock();
+ if (ret < 0) {
+ printk(KERN_WARNING "%s: register netdevice failed!\n",
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -924,6 +924,7 @@ struct dev_ifalias {
+ struct devlink;
+ struct tlsdev_ops;
+
++
+ /*
+ * This structure defines the management hooks for network devices.
+ * The following hooks can be defined; unless noted otherwise, they are
+@@ -1759,9 +1760,13 @@ enum netdev_priv_flags {
+ * @phydev: Physical device may attach itself
+ * for hardware timestamping
+ * @sfp_bus: attached &struct sfp_bus structure.
+- *
+- * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
+- * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
++ * @qdisc_tx_busylock_key: lockdep class annotating Qdisc->busylock
++ spinlock
++ * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
++ * @qdisc_xmit_lock_key: lockdep class annotating
++ * netdev_queue->_xmit_lock spinlock
++ * @addr_list_lock_key: lockdep class annotating
++ * net_device->addr_list_lock spinlock
+ *
+ * @proto_down: protocol port state information can be sent to the
+ * switch driver and used to set the phys state of the
+@@ -2048,8 +2053,10 @@ struct net_device {
+ #endif
+ struct phy_device *phydev;
+ struct sfp_bus *sfp_bus;
+- struct lock_class_key *qdisc_tx_busylock;
+- struct lock_class_key *qdisc_running_key;
++ struct lock_class_key qdisc_tx_busylock_key;
++ struct lock_class_key qdisc_running_key;
++ struct lock_class_key qdisc_xmit_lock_key;
++ struct lock_class_key addr_list_lock_key;
+ bool proto_down;
+ unsigned wol_enabled:1;
+ };
+@@ -2127,23 +2134,6 @@ static inline void netdev_for_each_tx_qu
+ f(dev, &dev->_tx[i], arg);
+ }
+
+-#define netdev_lockdep_set_classes(dev) \
+-{ \
+- static struct lock_class_key qdisc_tx_busylock_key; \
+- static struct lock_class_key qdisc_running_key; \
+- static struct lock_class_key qdisc_xmit_lock_key; \
+- static struct lock_class_key dev_addr_list_lock_key; \
+- unsigned int i; \
+- \
+- (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
+- (dev)->qdisc_running_key = &qdisc_running_key; \
+- lockdep_set_class(&(dev)->addr_list_lock, \
+- &dev_addr_list_lock_key); \
+- for (i = 0; i < (dev)->num_tx_queues; i++) \
+- lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
+- &qdisc_xmit_lock_key); \
+-}
+-
+ u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev);
+ struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
+@@ -3142,6 +3132,7 @@ static inline void netif_stop_queue(stru
+ }
+
+ void netif_tx_stop_all_queues(struct net_device *dev);
++void netdev_update_lockdep_key(struct net_device *dev);
+
+ static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
+ {
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -489,31 +489,6 @@ static void vlan_dev_set_rx_mode(struct
+ dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+ }
+
+-/*
+- * vlan network devices have devices nesting below it, and are a special
+- * "super class" of normal network devices; split their locks off into a
+- * separate class since they always nest.
+- */
+-static struct lock_class_key vlan_netdev_xmit_lock_key;
+-static struct lock_class_key vlan_netdev_addr_lock_key;
+-
+-static void vlan_dev_set_lockdep_one(struct net_device *dev,
+- struct netdev_queue *txq,
+- void *_subclass)
+-{
+- lockdep_set_class_and_subclass(&txq->_xmit_lock,
+- &vlan_netdev_xmit_lock_key,
+- *(int *)_subclass);
+-}
+-
+-static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
+-{
+- lockdep_set_class_and_subclass(&dev->addr_list_lock,
+- &vlan_netdev_addr_lock_key,
+- subclass);
+- netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
+-}
+-
+ static int vlan_dev_get_lock_subclass(struct net_device *dev)
+ {
+ return vlan_dev_priv(dev)->nest_level;
+@@ -609,8 +584,6 @@ static int vlan_dev_init(struct net_devi
+
+ SET_NETDEV_DEVTYPE(dev, &vlan_type);
+
+- vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev));
+-
+ vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
+ if (!vlan->vlan_pcpu_stats)
+ return -ENOMEM;
+--- a/net/batman-adv/soft-interface.c
++++ b/net/batman-adv/soft-interface.c
+@@ -740,36 +740,6 @@ static int batadv_interface_kill_vid(str
+ return 0;
+ }
+
+-/* batman-adv network devices have devices nesting below it and are a special
+- * "super class" of normal network devices; split their locks off into a
+- * separate class since they always nest.
+- */
+-static struct lock_class_key batadv_netdev_xmit_lock_key;
+-static struct lock_class_key batadv_netdev_addr_lock_key;
+-
+-/**
+- * batadv_set_lockdep_class_one() - Set lockdep class for a single tx queue
+- * @dev: device which owns the tx queue
+- * @txq: tx queue to modify
+- * @_unused: always NULL
+- */
+-static void batadv_set_lockdep_class_one(struct net_device *dev,
+- struct netdev_queue *txq,
+- void *_unused)
+-{
+- lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key);
+-}
+-
+-/**
+- * batadv_set_lockdep_class() - Set txq and addr_list lockdep class
+- * @dev: network device to modify
+- */
+-static void batadv_set_lockdep_class(struct net_device *dev)
+-{
+- lockdep_set_class(&dev->addr_list_lock, &batadv_netdev_addr_lock_key);
+- netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL);
+-}
+-
+ /**
+ * batadv_softif_init_late() - late stage initialization of soft interface
+ * @dev: registered network device to modify
+@@ -783,8 +753,6 @@ static int batadv_softif_init_late(struc
+ int ret;
+ size_t cnt_len = sizeof(u64) * BATADV_CNT_NUM;
+
+- batadv_set_lockdep_class(dev);
+-
+ bat_priv = netdev_priv(dev);
+ bat_priv->soft_iface = dev;
+
+--- a/net/bluetooth/6lowpan.c
++++ b/net/bluetooth/6lowpan.c
+@@ -571,15 +571,7 @@ static netdev_tx_t bt_xmit(struct sk_buf
+ return err < 0 ? NET_XMIT_DROP : err;
+ }
+
+-static int bt_dev_init(struct net_device *dev)
+-{
+- netdev_lockdep_set_classes(dev);
+-
+- return 0;
+-}
+-
+ static const struct net_device_ops netdev_ops = {
+- .ndo_init = bt_dev_init,
+ .ndo_start_xmit = bt_xmit,
+ };
+
+--- a/net/bridge/br_device.c
++++ b/net/bridge/br_device.c
+@@ -24,8 +24,6 @@
+ const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
+ EXPORT_SYMBOL_GPL(nf_br_ops);
+
+-static struct lock_class_key bridge_netdev_addr_lock_key;
+-
+ /* net device transmit always called with BH disabled */
+ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+@@ -108,11 +106,6 @@ out:
+ return NETDEV_TX_OK;
+ }
+
+-static void br_set_lockdep_class(struct net_device *dev)
+-{
+- lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key);
+-}
+-
+ static int br_dev_init(struct net_device *dev)
+ {
+ struct net_bridge *br = netdev_priv(dev);
+@@ -150,7 +143,6 @@ static int br_dev_init(struct net_device
+ br_mdb_hash_fini(br);
+ br_fdb_hash_fini(br);
+ }
+- br_set_lockdep_class(dev);
+
+ return err;
+ }
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -277,88 +277,6 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
+ DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
+ EXPORT_PER_CPU_SYMBOL(softnet_data);
+
+-#ifdef CONFIG_LOCKDEP
+-/*
+- * register_netdevice() inits txq->_xmit_lock and sets lockdep class
+- * according to dev->type
+- */
+-static const unsigned short netdev_lock_type[] = {
+- ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
+- ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
+- ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
+- ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
+- ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
+- ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
+- ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
+- ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
+- ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
+- ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
+- ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
+- ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
+- ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
+- ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
+- ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
+-
+-static const char *const netdev_lock_name[] = {
+- "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
+- "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
+- "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
+- "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
+- "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
+- "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
+- "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
+- "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
+- "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
+- "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
+- "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
+- "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
+- "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
+- "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
+- "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
+-
+-static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
+-static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
+-
+-static inline unsigned short netdev_lock_pos(unsigned short dev_type)
+-{
+- int i;
+-
+- for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
+- if (netdev_lock_type[i] == dev_type)
+- return i;
+- /* the last key is used by default */
+- return ARRAY_SIZE(netdev_lock_type) - 1;
+-}
+-
+-static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
+- unsigned short dev_type)
+-{
+- int i;
+-
+- i = netdev_lock_pos(dev_type);
+- lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
+- netdev_lock_name[i]);
+-}
+-
+-static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
+-{
+- int i;
+-
+- i = netdev_lock_pos(dev->type);
+- lockdep_set_class_and_name(&dev->addr_list_lock,
+- &netdev_addr_lock_key[i],
+- netdev_lock_name[i]);
+-}
+-#else
+-static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
+- unsigned short dev_type)
+-{
+-}
+-static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
+-{
+-}
+-#endif
+-
+ /*******************************************************************************
+ *
+ * Protocol management and registration routines
+@@ -8799,7 +8717,7 @@ static void netdev_init_one_queue(struct
+ {
+ /* Initialize queue lock */
+ spin_lock_init(&queue->_xmit_lock);
+- netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
++ lockdep_set_class(&queue->_xmit_lock, &dev->qdisc_xmit_lock_key);
+ queue->xmit_lock_owner = -1;
+ netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
+ queue->dev = dev;
+@@ -8846,6 +8764,43 @@ void netif_tx_stop_all_queues(struct net
+ }
+ EXPORT_SYMBOL(netif_tx_stop_all_queues);
+
++static void netdev_register_lockdep_key(struct net_device *dev)
++{
++ lockdep_register_key(&dev->qdisc_tx_busylock_key);
++ lockdep_register_key(&dev->qdisc_running_key);
++ lockdep_register_key(&dev->qdisc_xmit_lock_key);
++ lockdep_register_key(&dev->addr_list_lock_key);
++}
++
++static void netdev_unregister_lockdep_key(struct net_device *dev)
++{
++ lockdep_unregister_key(&dev->qdisc_tx_busylock_key);
++ lockdep_unregister_key(&dev->qdisc_running_key);
++ lockdep_unregister_key(&dev->qdisc_xmit_lock_key);
++ lockdep_unregister_key(&dev->addr_list_lock_key);
++}
++
++void netdev_update_lockdep_key(struct net_device *dev)
++{
++ struct netdev_queue *queue;
++ int i;
++
++ lockdep_unregister_key(&dev->qdisc_xmit_lock_key);
++ lockdep_unregister_key(&dev->addr_list_lock_key);
++
++ lockdep_register_key(&dev->qdisc_xmit_lock_key);
++ lockdep_register_key(&dev->addr_list_lock_key);
++
++ lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
++ for (i = 0; i < dev->num_tx_queues; i++) {
++ queue = netdev_get_tx_queue(dev, i);
++
++ lockdep_set_class(&queue->_xmit_lock,
++ &dev->qdisc_xmit_lock_key);
++ }
++}
++EXPORT_SYMBOL(netdev_update_lockdep_key);
++
+ /**
+ * register_netdevice - register a network device
+ * @dev: device to register
+@@ -8880,7 +8835,7 @@ int register_netdevice(struct net_device
+ BUG_ON(!net);
+
+ spin_lock_init(&dev->addr_list_lock);
+- netdev_set_addr_lockdep_class(dev);
++ lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
+
+ ret = dev_get_valid_name(net, dev, dev->name);
+ if (ret < 0)
+@@ -9390,6 +9345,8 @@ struct net_device *alloc_netdev_mqs(int
+
+ dev_net_set(dev, &init_net);
+
++ netdev_register_lockdep_key(dev);
++
+ dev->gso_max_size = GSO_MAX_SIZE;
+ dev->gso_max_segs = GSO_MAX_SEGS;
+ dev->upper_level = 1;
+@@ -9474,6 +9431,8 @@ void free_netdev(struct net_device *dev)
+ free_percpu(dev->pcpu_refcnt);
+ dev->pcpu_refcnt = NULL;
+
++ netdev_unregister_lockdep_key(dev);
++
+ /* Compatibility with error handling in drivers */
+ if (dev->reg_state == NETREG_UNINITIALIZED) {
+ netdev_freemem(dev);
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -2355,6 +2355,7 @@ static int do_set_master(struct net_devi
+ err = ops->ndo_del_slave(upper_dev, dev);
+ if (err)
+ return err;
++ netdev_update_lockdep_key(dev);
+ } else {
+ return -EOPNOTSUPP;
+ }
+--- a/net/dsa/master.c
++++ b/net/dsa/master.c
+@@ -244,8 +244,6 @@ static void dsa_master_reset_mtu(struct
+ rtnl_unlock();
+ }
+
+-static struct lock_class_key dsa_master_addr_list_lock_key;
+-
+ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
+ {
+ int ret;
+@@ -259,9 +257,6 @@ int dsa_master_setup(struct net_device *
+ wmb();
+
+ dev->dsa_ptr = cpu_dp;
+- lockdep_set_class(&dev->addr_list_lock,
+- &dsa_master_addr_list_lock_key);
+-
+ ret = dsa_master_ethtool_setup(dev);
+ if (ret)
+ return ret;
+--- a/net/dsa/slave.c
++++ b/net/dsa/slave.c
+@@ -1280,15 +1280,6 @@ static int dsa_slave_phy_setup(struct ne
+ return ret;
+ }
+
+-static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
+-static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
+- struct netdev_queue *txq,
+- void *_unused)
+-{
+- lockdep_set_class(&txq->_xmit_lock,
+- &dsa_slave_netdev_xmit_lock_key);
+-}
+-
+ int dsa_slave_suspend(struct net_device *slave_dev)
+ {
+ struct dsa_port *dp = dsa_slave_to_port(slave_dev);
+@@ -1371,9 +1362,6 @@ int dsa_slave_create(struct dsa_port *po
+ slave_dev->max_mtu = ETH_MAX_MTU;
+ SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
+
+- netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
+- NULL);
+-
+ SET_NETDEV_DEV(slave_dev, port->ds->dev);
+ slave_dev->dev.of_node = port->dn;
+ slave_dev->vlan_features = master->vlan_features;
+--- a/net/ieee802154/6lowpan/core.c
++++ b/net/ieee802154/6lowpan/core.c
+@@ -58,13 +58,6 @@ static const struct header_ops lowpan_he
+ .create = lowpan_header_create,
+ };
+
+-static int lowpan_dev_init(struct net_device *ldev)
+-{
+- netdev_lockdep_set_classes(ldev);
+-
+- return 0;
+-}
+-
+ static int lowpan_open(struct net_device *dev)
+ {
+ if (!open_count)
+@@ -96,7 +89,6 @@ static int lowpan_get_iflink(const struc
+ }
+
+ static const struct net_device_ops lowpan_netdev_ops = {
+- .ndo_init = lowpan_dev_init,
+ .ndo_start_xmit = lowpan_xmit,
+ .ndo_open = lowpan_open,
+ .ndo_stop = lowpan_stop,
+--- a/net/l2tp/l2tp_eth.c
++++ b/net/l2tp/l2tp_eth.c
+@@ -56,7 +56,6 @@ static int l2tp_eth_dev_init(struct net_
+ {
+ eth_hw_addr_random(dev);
+ eth_broadcast_addr(dev->broadcast);
+- netdev_lockdep_set_classes(dev);
+
+ return 0;
+ }
+--- a/net/netrom/af_netrom.c
++++ b/net/netrom/af_netrom.c
+@@ -64,28 +64,6 @@ static DEFINE_SPINLOCK(nr_list_lock);
+ static const struct proto_ops nr_proto_ops;
+
+ /*
+- * NETROM network devices are virtual network devices encapsulating NETROM
+- * frames into AX.25 which will be sent through an AX.25 device, so form a
+- * special "super class" of normal net devices; split their locks off into a
+- * separate class since they always nest.
+- */
+-static struct lock_class_key nr_netdev_xmit_lock_key;
+-static struct lock_class_key nr_netdev_addr_lock_key;
+-
+-static void nr_set_lockdep_one(struct net_device *dev,
+- struct netdev_queue *txq,
+- void *_unused)
+-{
+- lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key);
+-}
+-
+-static void nr_set_lockdep_key(struct net_device *dev)
+-{
+- lockdep_set_class(&dev->addr_list_lock, &nr_netdev_addr_lock_key);
+- netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL);
+-}
+-
+-/*
+ * Socket removal during an interrupt is now safe.
+ */
+ static void nr_remove_socket(struct sock *sk)
+@@ -1414,7 +1392,6 @@ static int __init nr_proto_init(void)
+ free_netdev(dev);
+ goto fail;
+ }
+- nr_set_lockdep_key(dev);
+ dev_nr[i] = dev;
+ }
+
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -65,28 +65,6 @@ static const struct proto_ops rose_proto
+ ax25_address rose_callsign;
+
+ /*
+- * ROSE network devices are virtual network devices encapsulating ROSE
+- * frames into AX.25 which will be sent through an AX.25 device, so form a
+- * special "super class" of normal net devices; split their locks off into a
+- * separate class since they always nest.
+- */
+-static struct lock_class_key rose_netdev_xmit_lock_key;
+-static struct lock_class_key rose_netdev_addr_lock_key;
+-
+-static void rose_set_lockdep_one(struct net_device *dev,
+- struct netdev_queue *txq,
+- void *_unused)
+-{
+- lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key);
+-}
+-
+-static void rose_set_lockdep_key(struct net_device *dev)
+-{
+- lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key);
+- netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
+-}
+-
+-/*
+ * Convert a ROSE address into text.
+ */
+ char *rose2asc(char *buf, const rose_address *addr)
+@@ -1533,7 +1511,6 @@ static int __init rose_proto_init(void)
+ free_netdev(dev);
+ goto fail;
+ }
+- rose_set_lockdep_key(dev);
+ dev_rose[i] = dev;
+ }
+
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -799,9 +799,6 @@ struct Qdisc_ops pfifo_fast_ops __read_m
+ };
+ EXPORT_SYMBOL(pfifo_fast_ops);
+
+-static struct lock_class_key qdisc_tx_busylock;
+-static struct lock_class_key qdisc_running_key;
+-
+ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
+ const struct Qdisc_ops *ops,
+ struct netlink_ext_ack *extack)
+@@ -854,17 +851,9 @@ struct Qdisc *qdisc_alloc(struct netdev_
+ }
+
+ spin_lock_init(&sch->busylock);
+- lockdep_set_class(&sch->busylock,
+- dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
+-
+ /* seqlock has the same scope of busylock, for NOLOCK qdisc */
+ spin_lock_init(&sch->seqlock);
+- lockdep_set_class(&sch->busylock,
+- dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
+-
+ seqcount_init(&sch->running);
+- lockdep_set_class(&sch->running,
+- dev->qdisc_running_key ?: &qdisc_running_key);
+
+ sch->ops = ops;
+ sch->flags = ops->static_flags;
+@@ -875,6 +864,12 @@ struct Qdisc *qdisc_alloc(struct netdev_
+ dev_hold(dev);
+ refcount_set(&sch->refcnt, 1);
+
++ if (sch != &noop_qdisc) {
++ lockdep_set_class(&sch->busylock, &dev->qdisc_tx_busylock_key);
++ lockdep_set_class(&sch->seqlock, &dev->qdisc_tx_busylock_key);
++ lockdep_set_class(&sch->running, &dev->qdisc_running_key);
++ }
++
+ return sch;
+ errout1:
+ kfree(p);
diff --git a/patches.suse/net-core-add-ignore-flag-to-netdev_adjacent-structur.patch b/patches.suse/net-core-add-ignore-flag-to-netdev_adjacent-structur.patch
new file mode 100644
index 0000000000..804c9a4257
--- /dev/null
+++ b/patches.suse/net-core-add-ignore-flag-to-netdev_adjacent-structur.patch
@@ -0,0 +1,446 @@
+From: Taehee Yoo <ap420073@gmail.com>
+Date: Mon, 21 Oct 2019 18:47:56 +0000
+Subject: net: core: add ignore flag to netdev_adjacent structure
+Patch-mainline: v5.4-rc6
+Git-commit: 32b6d34fedc2229cdf6a047fdbc0704085441915
+References: bsc#1154353
+
+In order to link an adjacent node, netdev_upper_dev_link() is used
+and in order to unlink an adjacent node, netdev_upper_dev_unlink() is used.
+unlink operation does not fail, but link operation can fail.
+
+In order to exchange adjacent nodes, we should unlink an old adjacent
+node first. then, link a new adjacent node.
+If link operation is failed, we should link an old adjacent node again.
+But this link operation can fail too.
+It eventually breaks the adjacent link relationship.
+
+This patch adds an ignore flag into the netdev_adjacent structure.
+If this flag is set, netdev_upper_dev_link() ignores an old adjacent
+node for a moment.
+
+This patch also adds new functions for other modules.
+netdev_adjacent_change_prepare()
+netdev_adjacent_change_commit()
+netdev_adjacent_change_abort()
+
+netdev_adjacent_change_prepare() inserts new device into adjacent list
+but new device is not allowed to use immediately.
+If netdev_adjacent_change_prepare() fails, it internally rollbacks
+adjacent list so that we don't need any other action.
+netdev_adjacent_change_commit() deletes old device in the adjacent list
+and allows new device to use.
+netdev_adjacent_change_abort() rollbacks adjacent list.
+
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ include/linux/netdevice.h | 10 ++
+ net/core/dev.c | 230 +++++++++++++++++++++++++++++++++++++++++-----
+ 2 files changed, 219 insertions(+), 21 deletions(-)
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -4323,6 +4323,16 @@ int netdev_master_upper_dev_link(struct
+ struct netlink_ext_ack *extack);
+ void netdev_upper_dev_unlink(struct net_device *dev,
+ struct net_device *upper_dev);
++int netdev_adjacent_change_prepare(struct net_device *old_dev,
++ struct net_device *new_dev,
++ struct net_device *dev,
++ struct netlink_ext_ack *extack);
++void netdev_adjacent_change_commit(struct net_device *old_dev,
++ struct net_device *new_dev,
++ struct net_device *dev);
++void netdev_adjacent_change_abort(struct net_device *old_dev,
++ struct net_device *new_dev,
++ struct net_device *dev);
+ void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
+ void *netdev_lower_dev_get_private(struct net_device *dev,
+ struct net_device *lower_dev);
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -6408,6 +6408,9 @@ struct netdev_adjacent {
+ /* upper master flag, there can only be one master device per list */
+ bool master;
+
++ /* lookup ignore flag */
++ bool ignore;
++
+ /* counter for the number of times this device was added to us */
+ u16 ref_nr;
+
+@@ -6430,7 +6433,7 @@ static struct netdev_adjacent *__netdev_
+ return NULL;
+ }
+
+-static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data)
++static int ____netdev_has_upper_dev(struct net_device *upper_dev, void *data)
+ {
+ struct net_device *dev = data;
+
+@@ -6451,7 +6454,7 @@ bool netdev_has_upper_dev(struct net_dev
+ {
+ ASSERT_RTNL();
+
+- return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
++ return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
+ upper_dev);
+ }
+ EXPORT_SYMBOL(netdev_has_upper_dev);
+@@ -6469,7 +6472,7 @@ EXPORT_SYMBOL(netdev_has_upper_dev);
+ bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
+ struct net_device *upper_dev)
+ {
+- return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
++ return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
+ upper_dev);
+ }
+ EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
+@@ -6513,6 +6516,22 @@ struct net_device *netdev_master_upper_d
+ }
+ EXPORT_SYMBOL(netdev_master_upper_dev_get);
+
++static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
++{
++ struct netdev_adjacent *upper;
++
++ ASSERT_RTNL();
++
++ if (list_empty(&dev->adj_list.upper))
++ return NULL;
++
++ upper = list_first_entry(&dev->adj_list.upper,
++ struct netdev_adjacent, list);
++ if (likely(upper->master) && !upper->ignore)
++ return upper->dev;
++ return NULL;
++}
++
+ /**
+ * netdev_has_any_lower_dev - Check if device is linked to some device
+ * @dev: device
+@@ -6563,8 +6582,9 @@ struct net_device *netdev_upper_get_next
+ }
+ EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
+
+-static struct net_device *netdev_next_upper_dev(struct net_device *dev,
+- struct list_head **iter)
++static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
++ struct list_head **iter,
++ bool *ignore)
+ {
+ struct netdev_adjacent *upper;
+
+@@ -6574,6 +6594,7 @@ static struct net_device *netdev_next_up
+ return NULL;
+
+ *iter = &upper->list;
++ *ignore = upper->ignore;
+
+ return upper->dev;
+ }
+@@ -6595,14 +6616,15 @@ static struct net_device *netdev_next_up
+ return upper->dev;
+ }
+
+-static int netdev_walk_all_upper_dev(struct net_device *dev,
+- int (*fn)(struct net_device *dev,
+- void *data),
+- void *data)
++static int __netdev_walk_all_upper_dev(struct net_device *dev,
++ int (*fn)(struct net_device *dev,
++ void *data),
++ void *data)
+ {
+ struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
+ struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
+ int ret, cur = 0;
++ bool ignore;
+
+ now = dev;
+ iter = &dev->adj_list.upper;
+@@ -6616,9 +6638,11 @@ static int netdev_walk_all_upper_dev(str
+
+ next = NULL;
+ while (1) {
+- udev = netdev_next_upper_dev(now, &iter);
++ udev = __netdev_next_upper_dev(now, &iter, &ignore);
+ if (!udev)
+ break;
++ if (ignore)
++ continue;
+
+ next = udev;
+ niter = &udev->adj_list.upper;
+@@ -6688,6 +6712,15 @@ int netdev_walk_all_upper_dev_rcu(struct
+ }
+ EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
+
++static bool __netdev_has_upper_dev(struct net_device *dev,
++ struct net_device *upper_dev)
++{
++ ASSERT_RTNL();
++
++ return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
++ upper_dev);
++}
++
+ /**
+ * netdev_lower_get_next_private - Get the next ->private from the
+ * lower neighbour list
+@@ -6784,6 +6817,23 @@ static struct net_device *netdev_next_lo
+ return lower->dev;
+ }
+
++static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
++ struct list_head **iter,
++ bool *ignore)
++{
++ struct netdev_adjacent *lower;
++
++ lower = list_entry((*iter)->next, struct netdev_adjacent, list);
++
++ if (&lower->list == &dev->adj_list.lower)
++ return NULL;
++
++ *iter = &lower->list;
++ *ignore = lower->ignore;
++
++ return lower->dev;
++}
++
+ int netdev_walk_all_lower_dev(struct net_device *dev,
+ int (*fn)(struct net_device *dev,
+ void *data),
+@@ -6831,6 +6881,55 @@ int netdev_walk_all_lower_dev(struct net
+ }
+ EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
+
++static int __netdev_walk_all_lower_dev(struct net_device *dev,
++ int (*fn)(struct net_device *dev,
++ void *data),
++ void *data)
++{
++ struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
++ struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
++ int ret, cur = 0;
++ bool ignore;
++
++ now = dev;
++ iter = &dev->adj_list.lower;
++
++ while (1) {
++ if (now != dev) {
++ ret = fn(now, data);
++ if (ret)
++ return ret;
++ }
++
++ next = NULL;
++ while (1) {
++ ldev = __netdev_next_lower_dev(now, &iter, &ignore);
++ if (!ldev)
++ break;
++ if (ignore)
++ continue;
++
++ next = ldev;
++ niter = &ldev->adj_list.lower;
++ dev_stack[cur] = now;
++ iter_stack[cur++] = iter;
++ break;
++ }
++
++ if (!next) {
++ if (!cur)
++ return 0;
++ next = dev_stack[--cur];
++ niter = iter_stack[cur];
++ }
++
++ now = next;
++ iter = niter;
++ }
++
++ return 0;
++}
++
+ static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
+ struct list_head **iter)
+ {
+@@ -6850,11 +6949,14 @@ static u8 __netdev_upper_depth(struct ne
+ struct net_device *udev;
+ struct list_head *iter;
+ u8 max_depth = 0;
++ bool ignore;
+
+ for (iter = &dev->adj_list.upper,
+- udev = netdev_next_upper_dev(dev, &iter);
++ udev = __netdev_next_upper_dev(dev, &iter, &ignore);
+ udev;
+- udev = netdev_next_upper_dev(dev, &iter)) {
++ udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
++ if (ignore)
++ continue;
+ if (max_depth < udev->upper_level)
+ max_depth = udev->upper_level;
+ }
+@@ -6867,11 +6969,14 @@ static u8 __netdev_lower_depth(struct ne
+ struct net_device *ldev;
+ struct list_head *iter;
+ u8 max_depth = 0;
++ bool ignore;
+
+ for (iter = &dev->adj_list.lower,
+- ldev = netdev_next_lower_dev(dev, &iter);
++ ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
+ ldev;
+- ldev = netdev_next_lower_dev(dev, &iter)) {
++ ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
++ if (ignore)
++ continue;
+ if (max_depth < ldev->lower_level)
+ max_depth = ldev->lower_level;
+ }
+@@ -7035,6 +7140,7 @@ static int __netdev_adjacent_dev_insert(
+ adj->master = master;
+ adj->ref_nr = 1;
+ adj->private = private;
++ adj->ignore = false;
+ dev_hold(adj_dev);
+
+ pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
+@@ -7185,17 +7291,17 @@ static int __netdev_upper_dev_link(struc
+ return -EBUSY;
+
+ /* To prevent loops, check if dev is not upper device to upper_dev. */
+- if (netdev_has_upper_dev(upper_dev, dev))
++ if (__netdev_has_upper_dev(upper_dev, dev))
+ return -EBUSY;
+
+ if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
+ return -EMLINK;
+
+ if (!master) {
+- if (netdev_has_upper_dev(dev, upper_dev))
++ if (__netdev_has_upper_dev(dev, upper_dev))
+ return -EEXIST;
+ } else {
+- master_dev = netdev_master_upper_dev_get(dev);
++ master_dev = __netdev_master_upper_dev_get(dev);
+ if (master_dev)
+ return master_dev == upper_dev ? -EEXIST : -EBUSY;
+ }
+@@ -7218,10 +7324,11 @@ static int __netdev_upper_dev_link(struc
+ goto rollback;
+
+ __netdev_update_upper_level(dev, NULL);
+- netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
++ __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
+
+ __netdev_update_lower_level(upper_dev, NULL);
+- netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level, NULL);
++ __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
++ NULL);
+
+ return 0;
+
+@@ -7307,13 +7414,94 @@ void netdev_upper_dev_unlink(struct net_
+ &changeupper_info.info);
+
+ __netdev_update_upper_level(dev, NULL);
+- netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
++ __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
+
+ __netdev_update_lower_level(upper_dev, NULL);
+- netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level, NULL);
++ __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
++ NULL);
+ }
+ EXPORT_SYMBOL(netdev_upper_dev_unlink);
+
++static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
++ struct net_device *lower_dev,
++ bool val)
++{
++ struct netdev_adjacent *adj;
++
++ adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower);
++ if (adj)
++ adj->ignore = val;
++
++ adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper);
++ if (adj)
++ adj->ignore = val;
++}
++
++static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
++ struct net_device *lower_dev)
++{
++ __netdev_adjacent_dev_set(upper_dev, lower_dev, true);
++}
++
++static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
++ struct net_device *lower_dev)
++{
++ __netdev_adjacent_dev_set(upper_dev, lower_dev, false);
++}
++
++int netdev_adjacent_change_prepare(struct net_device *old_dev,
++ struct net_device *new_dev,
++ struct net_device *dev,
++ struct netlink_ext_ack *extack)
++{
++ int err;
++
++ if (!new_dev)
++ return 0;
++
++ if (old_dev && new_dev != old_dev)
++ netdev_adjacent_dev_disable(dev, old_dev);
++
++ err = netdev_upper_dev_link(new_dev, dev, extack);
++ if (err) {
++ if (old_dev && new_dev != old_dev)
++ netdev_adjacent_dev_enable(dev, old_dev);
++ return err;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(netdev_adjacent_change_prepare);
++
++void netdev_adjacent_change_commit(struct net_device *old_dev,
++ struct net_device *new_dev,
++ struct net_device *dev)
++{
++ if (!new_dev || !old_dev)
++ return;
++
++ if (new_dev == old_dev)
++ return;
++
++ netdev_adjacent_dev_enable(dev, old_dev);
++ netdev_upper_dev_unlink(old_dev, dev);
++}
++EXPORT_SYMBOL(netdev_adjacent_change_commit);
++
++void netdev_adjacent_change_abort(struct net_device *old_dev,
++ struct net_device *new_dev,
++ struct net_device *dev)
++{
++ if (!new_dev)
++ return;
++
++ if (old_dev && new_dev != old_dev)
++ netdev_adjacent_dev_enable(dev, old_dev);
++
++ netdev_upper_dev_unlink(new_dev, dev);
++}
++EXPORT_SYMBOL(netdev_adjacent_change_abort);
++
+ /**
+ * netdev_bonding_info_change - Dispatch event about slave change
+ * @dev: device
diff --git a/patches.suse/net-core-limit-nested-device-depth.patch b/patches.suse/net-core-limit-nested-device-depth.patch
new file mode 100644
index 0000000000..94a5d55117
--- /dev/null
+++ b/patches.suse/net-core-limit-nested-device-depth.patch
@@ -0,0 +1,448 @@
+From: Taehee Yoo <ap420073@gmail.com>
+Date: Mon, 21 Oct 2019 18:47:50 +0000
+Subject: net: core: limit nested device depth
+Patch-mainline: v5.4-rc6
+Git-commit: 5343da4c17429efaa5fb1594ea96aee1a283e694
+References: bsc#1154353
+
+Current code doesn't limit the number of nested devices.
+Nested devices would be handled recursively and this needs huge stack
+memory. So, unlimited nested devices could make stack overflow.
+
+This patch adds upper_level and lower_level, they are common variables
+and represent maximum lower/upper depth.
+When upper/lower device is attached or dettached,
+{lower/upper}_level are updated. and if maximum depth is bigger than 8,
+attach routine fails and returns -EMLINK.
+
+In addition, this patch converts recursive routine of
+netdev_walk_all_{lower/upper} to iterator routine.
+
+Test commands:
+ ip link add dummy0 type dummy
+ ip link add link dummy0 name vlan1 type vlan id 1
+ ip link set vlan1 up
+
+ for i in {2..55}
+ do
+ let A=$i-1
+
+ ip link add vlan$i link vlan$A type vlan id $i
+ done
+ ip link del dummy0
+
+Splat looks like:
+[ 155.513226][ T908] BUG: KASAN: use-after-free in __unwind_start+0x71/0x850
+[ 155.514162][ T908] Write of size 88 at addr ffff8880608a6cc0 by task ip/908
+[ 155.515048][ T908]
+[ 155.515333][ T908] CPU: 0 PID: 908 Comm: ip Not tainted 5.4.0-rc3+ #96
+[ 155.516147][ T908] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006
+[ 155.517233][ T908] Call Trace:
+[ 155.517627][ T908]
+[ 155.517918][ T908] Allocated by task 0:
+[ 155.518412][ T908] (stack is not available)
+[ 155.518955][ T908]
+[ 155.519228][ T908] Freed by task 0:
+[ 155.519885][ T908] (stack is not available)
+[ 155.520452][ T908]
+[ 155.520729][ T908] The buggy address belongs to the object at ffff8880608a6ac0
+[ 155.520729][ T908] which belongs to the cache names_cache of size 4096
+[ 155.522387][ T908] The buggy address is located 512 bytes inside of
+[ 155.522387][ T908] 4096-byte region [ffff8880608a6ac0, ffff8880608a7ac0)
+[ 155.523920][ T908] The buggy address belongs to the page:
+[ 155.524552][ T908] page:ffffea0001822800 refcount:1 mapcount:0 mapping:ffff88806c657cc0 index:0x0 compound_mapcount:0
+[ 155.525836][ T908] flags: 0x100000000010200(slab|head)
+[ 155.526445][ T908] raw: 0100000000010200 ffffea0001813808 ffffea0001a26c08 ffff88806c657cc0
+[ 155.527424][ T908] raw: 0000000000000000 0000000000070007 00000001ffffffff 0000000000000000
+[ 155.528429][ T908] page dumped because: kasan: bad access detected
+[ 155.529158][ T908]
+[ 155.529410][ T908] Memory state around the buggy address:
+[ 155.530060][ T908] ffff8880608a6b80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+[ 155.530971][ T908] ffff8880608a6c00: fb fb fb fb fb f1 f1 f1 f1 00 f2 f2 f2 f3 f3 f3
+[ 155.531889][ T908] >ffff8880608a6c80: f3 fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+[ 155.532806][ T908] ^
+[ 155.533509][ T908] ffff8880608a6d00: fb fb fb fb fb fb fb fb fb f1 f1 f1 f1 00 00 00
+[ 155.534436][ T908] ffff8880608a6d80: f2 f3 f3 f3 f3 fb fb fb 00 00 00 00 00 00 00 00
+[ ... ]
+
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ include/linux/netdevice.h | 4
+ net/core/dev.c | 272 ++++++++++++++++++++++++++++++++++++++--------
+ 2 files changed, 231 insertions(+), 45 deletions(-)
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1648,6 +1648,8 @@ enum netdev_priv_flags {
+ * @perm_addr: Permanent hw address
+ * @addr_assign_type: Hw address assignment type
+ * @addr_len: Hardware address length
++ * @upper_level: Maximum depth level of upper devices.
++ * @lower_level: Maximum depth level of lower devices.
+ * @neigh_priv_len: Used in neigh_alloc()
+ * @dev_id: Used to differentiate devices that share
+ * the same link layer address
+@@ -1874,6 +1876,8 @@ struct net_device {
+ unsigned char perm_addr[MAX_ADDR_LEN];
+ unsigned char addr_assign_type;
+ unsigned char addr_len;
++ unsigned char upper_level;
++ unsigned char lower_level;
+ unsigned short neigh_priv_len;
+ unsigned short dev_id;
+ unsigned short dev_port;
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -146,6 +146,7 @@
+ #include "net-sysfs.h"
+
+ #define MAX_GRO_SKBS 8
++#define MAX_NEST_DEV 8
+
+ /* This should be increased if a protocol with a bigger head is added. */
+ #define GRO_MAX_HEAD (MAX_HEADER + 128)
+@@ -6644,6 +6645,21 @@ struct net_device *netdev_upper_get_next
+ }
+ EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
+
++static struct net_device *netdev_next_upper_dev(struct net_device *dev,
++ struct list_head **iter)
++{
++ struct netdev_adjacent *upper;
++
++ upper = list_entry((*iter)->next, struct netdev_adjacent, list);
++
++ if (&upper->list == &dev->adj_list.upper)
++ return NULL;
++
++ *iter = &upper->list;
++
++ return upper->dev;
++}
++
+ static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
+ struct list_head **iter)
+ {
+@@ -6661,28 +6677,93 @@ static struct net_device *netdev_next_up
+ return upper->dev;
+ }
+
++static int netdev_walk_all_upper_dev(struct net_device *dev,
++ int (*fn)(struct net_device *dev,
++ void *data),
++ void *data)
++{
++ struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
++ struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
++ int ret, cur = 0;
++
++ now = dev;
++ iter = &dev->adj_list.upper;
++
++ while (1) {
++ if (now != dev) {
++ ret = fn(now, data);
++ if (ret)
++ return ret;
++ }
++
++ next = NULL;
++ while (1) {
++ udev = netdev_next_upper_dev(now, &iter);
++ if (!udev)
++ break;
++
++ next = udev;
++ niter = &udev->adj_list.upper;
++ dev_stack[cur] = now;
++ iter_stack[cur++] = iter;
++ break;
++ }
++
++ if (!next) {
++ if (!cur)
++ return 0;
++ next = dev_stack[--cur];
++ niter = iter_stack[cur];
++ }
++
++ now = next;
++ iter = niter;
++ }
++
++ return 0;
++}
++
+ int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
+ int (*fn)(struct net_device *dev,
+ void *data),
+ void *data)
+ {
+- struct net_device *udev;
+- struct list_head *iter;
+- int ret;
++ struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
++ struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
++ int ret, cur = 0;
+
+- for (iter = &dev->adj_list.upper,
+- udev = netdev_next_upper_dev_rcu(dev, &iter);
+- udev;
+- udev = netdev_next_upper_dev_rcu(dev, &iter)) {
+- /* first is the upper device itself */
+- ret = fn(udev, data);
+- if (ret)
+- return ret;
++ now = dev;
++ iter = &dev->adj_list.upper;
+
+- /* then look at all of its upper devices */
+- ret = netdev_walk_all_upper_dev_rcu(udev, fn, data);
+- if (ret)
+- return ret;
++ while (1) {
++ if (now != dev) {
++ ret = fn(now, data);
++ if (ret)
++ return ret;
++ }
++
++ next = NULL;
++ while (1) {
++ udev = netdev_next_upper_dev_rcu(now, &iter);
++ if (!udev)
++ break;
++
++ next = udev;
++ niter = &udev->adj_list.upper;
++ dev_stack[cur] = now;
++ iter_stack[cur++] = iter;
++ break;
++ }
++
++ if (!next) {
++ if (!cur)
++ return 0;
++ next = dev_stack[--cur];
++ niter = iter_stack[cur];
++ }
++
++ now = next;
++ iter = niter;
+ }
+
+ return 0;
+@@ -6790,23 +6871,42 @@ int netdev_walk_all_lower_dev(struct net
+ void *data),
+ void *data)
+ {
+- struct net_device *ldev;
+- struct list_head *iter;
+- int ret;
++ struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
++ struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
++ int ret, cur = 0;
+
+- for (iter = &dev->adj_list.lower,
+- ldev = netdev_next_lower_dev(dev, &iter);
+- ldev;
+- ldev = netdev_next_lower_dev(dev, &iter)) {
+- /* first is the lower device itself */
+- ret = fn(ldev, data);
+- if (ret)
+- return ret;
++ now = dev;
++ iter = &dev->adj_list.lower;
+
+- /* then look at all of its lower devices */
+- ret = netdev_walk_all_lower_dev(ldev, fn, data);
+- if (ret)
+- return ret;
++ while (1) {
++ if (now != dev) {
++ ret = fn(now, data);
++ if (ret)
++ return ret;
++ }
++
++ next = NULL;
++ while (1) {
++ ldev = netdev_next_lower_dev(now, &iter);
++ if (!ldev)
++ break;
++
++ next = ldev;
++ niter = &ldev->adj_list.lower;
++ dev_stack[cur] = now;
++ iter_stack[cur++] = iter;
++ break;
++ }
++
++ if (!next) {
++ if (!cur)
++ return 0;
++ next = dev_stack[--cur];
++ niter = iter_stack[cur];
++ }
++
++ now = next;
++ iter = niter;
+ }
+
+ return 0;
+@@ -6827,28 +6927,93 @@ static struct net_device *netdev_next_lo
+ return lower->dev;
+ }
+
+-int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
+- int (*fn)(struct net_device *dev,
+- void *data),
+- void *data)
++static u8 __netdev_upper_depth(struct net_device *dev)
++{
++ struct net_device *udev;
++ struct list_head *iter;
++ u8 max_depth = 0;
++
++ for (iter = &dev->adj_list.upper,
++ udev = netdev_next_upper_dev(dev, &iter);
++ udev;
++ udev = netdev_next_upper_dev(dev, &iter)) {
++ if (max_depth < udev->upper_level)
++ max_depth = udev->upper_level;
++ }
++
++ return max_depth;
++}
++
++static u8 __netdev_lower_depth(struct net_device *dev)
+ {
+ struct net_device *ldev;
+ struct list_head *iter;
+- int ret;
++ u8 max_depth = 0;
+
+ for (iter = &dev->adj_list.lower,
+- ldev = netdev_next_lower_dev_rcu(dev, &iter);
++ ldev = netdev_next_lower_dev(dev, &iter);
+ ldev;
+- ldev = netdev_next_lower_dev_rcu(dev, &iter)) {
+- /* first is the lower device itself */
+- ret = fn(ldev, data);
+- if (ret)
+- return ret;
++ ldev = netdev_next_lower_dev(dev, &iter)) {
++ if (max_depth < ldev->lower_level)
++ max_depth = ldev->lower_level;
++ }
+
+- /* then look at all of its lower devices */
+- ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data);
+- if (ret)
+- return ret;
++ return max_depth;
++}
++
++static int __netdev_update_upper_level(struct net_device *dev, void *data)
++{
++ dev->upper_level = __netdev_upper_depth(dev) + 1;
++ return 0;
++}
++
++static int __netdev_update_lower_level(struct net_device *dev, void *data)
++{
++ dev->lower_level = __netdev_lower_depth(dev) + 1;
++ return 0;
++}
++
++int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
++ int (*fn)(struct net_device *dev,
++ void *data),
++ void *data)
++{
++ struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
++ struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
++ int ret, cur = 0;
++
++ now = dev;
++ iter = &dev->adj_list.lower;
++
++ while (1) {
++ if (now != dev) {
++ ret = fn(now, data);
++ if (ret)
++ return ret;
++ }
++
++ next = NULL;
++ while (1) {
++ ldev = netdev_next_lower_dev_rcu(now, &iter);
++ if (!ldev)
++ break;
++
++ next = ldev;
++ niter = &ldev->adj_list.lower;
++ dev_stack[cur] = now;
++ iter_stack[cur++] = iter;
++ break;
++ }
++
++ if (!next) {
++ if (!cur)
++ return 0;
++ next = dev_stack[--cur];
++ niter = iter_stack[cur];
++ }
++
++ now = next;
++ iter = niter;
+ }
+
+ return 0;
+@@ -7105,6 +7270,9 @@ static int __netdev_upper_dev_link(struc
+ if (netdev_has_upper_dev(upper_dev, dev))
+ return -EBUSY;
+
++ if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
++ return -EMLINK;
++
+ if (!master) {
+ if (netdev_has_upper_dev(dev, upper_dev))
+ return -EEXIST;
+@@ -7131,6 +7299,12 @@ static int __netdev_upper_dev_link(struc
+ if (ret)
+ goto rollback;
+
++ __netdev_update_upper_level(dev, NULL);
++ netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
++
++ __netdev_update_lower_level(upper_dev, NULL);
++ netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level, NULL);
++
+ return 0;
+
+ rollback:
+@@ -7213,6 +7387,12 @@ void netdev_upper_dev_unlink(struct net_
+
+ call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
+ &changeupper_info.info);
++
++ __netdev_update_upper_level(dev, NULL);
++ netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
++
++ __netdev_update_lower_level(upper_dev, NULL);
++ netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level, NULL);
+ }
+ EXPORT_SYMBOL(netdev_upper_dev_unlink);
+
+@@ -9212,6 +9392,8 @@ struct net_device *alloc_netdev_mqs(int
+
+ dev->gso_max_size = GSO_MAX_SIZE;
+ dev->gso_max_segs = GSO_MAX_SEGS;
++ dev->upper_level = 1;
++ dev->lower_level = 1;
+
+ INIT_LIST_HEAD(&dev->napi_list);
+ INIT_LIST_HEAD(&dev->unreg_list);
diff --git a/patches.suse/net-ensure-correct-skb-tstamp-in-various-fragmenters.patch b/patches.suse/net-ensure-correct-skb-tstamp-in-various-fragmenters.patch
new file mode 100644
index 0000000000..f5a3948a72
--- /dev/null
+++ b/patches.suse/net-ensure-correct-skb-tstamp-in-various-fragmenters.patch
@@ -0,0 +1,138 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 16 Oct 2019 18:00:56 -0700
+Subject: net: ensure correct skb->tstamp in various fragmenters
+Patch-mainline: v5.4-rc4
+Git-commit: 9669fffc1415bb0c30e5d2ec98a8e1c3a418cb9c
+References: bsc#1154353
+
+Thomas found that some forwarded packets would be stuck
+in FQ packet scheduler because their skb->tstamp contained
+timestamps far in the future.
+
+We thought we addressed this point in commit 8203e2d844d3
+("net: clear skb->tstamp in forwarding paths") but there
+is still an issue when/if a packet needs to be fragmented.
+
+In order to meet EDT requirements, we have to make sure all
+fragments get the original skb->tstamp.
+
+Note that this original skb->tstamp should be zero in
+forwarding path, but might have a non zero value in
+output path if user decided so.
+
+Fixes: fb420d5d91c1 ("tcp/fq: move back to CLOCK_MONOTONIC")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Thomas Bartschies <Thomas.Bartschies@cvk.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/bridge/netfilter/nf_conntrack_bridge.c | 3 +++
+ net/ipv4/ip_output.c | 3 +++
+ net/ipv6/ip6_output.c | 3 +++
+ net/ipv6/netfilter.c | 3 +++
+ 4 files changed, 12 insertions(+)
+
+--- a/net/bridge/netfilter/nf_conntrack_bridge.c
++++ b/net/bridge/netfilter/nf_conntrack_bridge.c
+@@ -34,6 +34,7 @@ static int nf_br_ip_fragment(struct net
+ {
+ int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
+ unsigned int hlen, ll_rs, mtu;
++ ktime_t tstamp = skb->tstamp;
+ struct ip_frag_state state;
+ struct iphdr *iph;
+ int err;
+@@ -81,6 +82,7 @@ static int nf_br_ip_fragment(struct net
+ if (iter.frag)
+ ip_fraglist_prepare(skb, &iter);
+
++ skb->tstamp = tstamp;
+ err = output(net, sk, data, skb);
+ if (err || !iter.frag)
+ break;
+@@ -105,6 +107,7 @@ slow_path:
+ goto blackhole;
+ }
+
++ skb2->tstamp = tstamp;
+ err = output(net, sk, data, skb2);
+ if (err)
+ goto blackhole;
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -771,6 +771,7 @@ int ip_do_fragment(struct net *net, stru
+ struct rtable *rt = skb_rtable(skb);
+ unsigned int mtu, hlen, ll_rs;
+ struct ip_fraglist_iter iter;
++ ktime_t tstamp = skb->tstamp;
+ struct ip_frag_state state;
+ int err = 0;
+
+@@ -846,6 +847,7 @@ int ip_do_fragment(struct net *net, stru
+ ip_fraglist_prepare(skb, &iter);
+ }
+
++ skb->tstamp = tstamp;
+ err = output(net, sk, skb);
+
+ if (!err)
+@@ -900,6 +902,7 @@ slow_path:
+ /*
+ * Put this fragment into the sending queue.
+ */
++ skb2->tstamp = tstamp;
+ err = output(net, sk, skb2);
+ if (err)
+ goto fail;
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -768,6 +768,7 @@ int ip6_fragment(struct net *net, struct
+ inet6_sk(skb->sk) : NULL;
+ struct ip6_frag_state state;
+ unsigned int mtu, hlen, nexthdr_offset;
++ ktime_t tstamp = skb->tstamp;
+ int hroom, err = 0;
+ __be32 frag_id;
+ u8 *prevhdr, nexthdr = 0;
+@@ -855,6 +856,7 @@ int ip6_fragment(struct net *net, struct
+ if (iter.frag)
+ ip6_fraglist_prepare(skb, &iter);
+
++ skb->tstamp = tstamp;
+ err = output(net, sk, skb);
+ if (!err)
+ IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
+@@ -913,6 +915,7 @@ slow_path:
+ /*
+ * Put this fragment into the sending queue.
+ */
++ frag->tstamp = tstamp;
+ err = output(net, sk, frag);
+ if (err)
+ goto fail;
+--- a/net/ipv6/netfilter.c
++++ b/net/ipv6/netfilter.c
+@@ -119,6 +119,7 @@ int br_ip6_fragment(struct net *net, str
+ struct sk_buff *))
+ {
+ int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
++ ktime_t tstamp = skb->tstamp;
+ struct ip6_frag_state state;
+ u8 *prevhdr, nexthdr = 0;
+ unsigned int mtu, hlen;
+@@ -183,6 +184,7 @@ int br_ip6_fragment(struct net *net, str
+ if (iter.frag)
+ ip6_fraglist_prepare(skb, &iter);
+
++ skb->tstamp = tstamp;
+ err = output(net, sk, data, skb);
+ if (err || !iter.frag)
+ break;
+@@ -215,6 +217,7 @@ slow_path:
+ goto blackhole;
+ }
+
++ skb2->tstamp = tstamp;
+ err = output(net, sk, data, skb2);
+ if (err)
+ goto blackhole;
diff --git a/patches.suse/net-fix-installing-orphaned-programs.patch b/patches.suse/net-fix-installing-orphaned-programs.patch
new file mode 100644
index 0000000000..2515c37f5b
--- /dev/null
+++ b/patches.suse/net-fix-installing-orphaned-programs.patch
@@ -0,0 +1,46 @@
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+Date: Thu, 31 Oct 2019 20:07:00 -0700
+Subject: net: fix installing orphaned programs
+Patch-mainline: v5.4-rc6
+Git-commit: aefc3e723a78c2e429a64dadd7815ef2a4aecd44
+References: bsc#1154353
+
+When netdevice with offloaded BPF programs is destroyed
+the programs are orphaned and removed from the program
+IDA - their IDs get released (the programs may remain
+accessible via existing open file descriptors and pinned
+files). After IDs are released they are set to 0.
+
+This confuses dev_change_xdp_fd() because it compares
+the __dev_xdp_query() result where 0 means no program
+with prog->aux->id where 0 means orphaned.
+
+dev_change_xdp_fd() would have incorrectly returned success
+even though it had not installed the program.
+
+Since drivers already catch this case via bpf_offload_dev_match()
+let them handle this case. The error message drivers produce in
+this case ("program loaded for a different device") is in fact
+correct as the orphaned program must had to be loaded for a
+different device.
+
+Fixes: c14a9f633d9e ("net: Don't call XDP_SETUP_PROG when nothing is changed")
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/core/dev.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -8421,7 +8421,8 @@ int dev_change_xdp_fd(struct net_device
+ return -EINVAL;
+ }
+
+- if (prog->aux->id == prog_id) {
++ /* prog->aux->id may be 0 for orphaned device-bound progs */
++ if (prog->aux->id && prog->aux->id == prog_id) {
+ bpf_prog_put(prog);
+ return 0;
+ }
diff --git a/patches.suse/net-fix-sk_page_frag-recursion-from-memory-reclaim.patch b/patches.suse/net-fix-sk_page_frag-recursion-from-memory-reclaim.patch
new file mode 100644
index 0000000000..42feaf4f5a
--- /dev/null
+++ b/patches.suse/net-fix-sk_page_frag-recursion-from-memory-reclaim.patch
@@ -0,0 +1,158 @@
+From: Tejun Heo <tj@kernel.org>
+Date: Thu, 24 Oct 2019 13:50:27 -0700
+Subject: net: fix sk_page_frag() recursion from memory reclaim
+Patch-mainline: v5.4-rc6
+Git-commit: 20eb4f29b60286e0d6dc01d9c260b4bd383c58fb
+References: bsc#1154353
+
+sk_page_frag() optimizes skb_frag allocations by using per-task
+skb_frag cache when it knows it's the only user. The condition is
+determined by seeing whether the socket allocation mask allows
+blocking - if the allocation may block, it obviously owns the task's
+context and ergo exclusively owns current->task_frag.
+
+Unfortunately, this misses recursion through memory reclaim path.
+Please take a look at the following backtrace.
+
+ [2] RIP: 0010:tcp_sendmsg_locked+0xccf/0xe10
+ ...
+ tcp_sendmsg+0x27/0x40
+ sock_sendmsg+0x30/0x40
+ sock_xmit.isra.24+0xa1/0x170 [nbd]
+ nbd_send_cmd+0x1d2/0x690 [nbd]
+ nbd_queue_rq+0x1b5/0x3b0 [nbd]
+ __blk_mq_try_issue_directly+0x108/0x1b0
+ blk_mq_request_issue_directly+0xbd/0xe0
+ blk_mq_try_issue_list_directly+0x41/0xb0
+ blk_mq_sched_insert_requests+0xa2/0xe0
+ blk_mq_flush_plug_list+0x205/0x2a0
+ blk_flush_plug_list+0xc3/0xf0
+ [1] blk_finish_plug+0x21/0x2e
+ _xfs_buf_ioapply+0x313/0x460
+ __xfs_buf_submit+0x67/0x220
+ xfs_buf_read_map+0x113/0x1a0
+ xfs_trans_read_buf_map+0xbf/0x330
+ xfs_btree_read_buf_block.constprop.42+0x95/0xd0
+ xfs_btree_lookup_get_block+0x95/0x170
+ xfs_btree_lookup+0xcc/0x470
+ xfs_bmap_del_extent_real+0x254/0x9a0
+ __xfs_bunmapi+0x45c/0xab0
+ xfs_bunmapi+0x15/0x30
+ xfs_itruncate_extents_flags+0xca/0x250
+ xfs_free_eofblocks+0x181/0x1e0
+ xfs_fs_destroy_inode+0xa8/0x1b0
+ destroy_inode+0x38/0x70
+ dispose_list+0x35/0x50
+ prune_icache_sb+0x52/0x70
+ super_cache_scan+0x120/0x1a0
+ do_shrink_slab+0x120/0x290
+ shrink_slab+0x216/0x2b0
+ shrink_node+0x1b6/0x4a0
+ do_try_to_free_pages+0xc6/0x370
+ try_to_free_mem_cgroup_pages+0xe3/0x1e0
+ try_charge+0x29e/0x790
+ mem_cgroup_charge_skmem+0x6a/0x100
+ __sk_mem_raise_allocated+0x18e/0x390
+ __sk_mem_schedule+0x2a/0x40
+ [0] tcp_sendmsg_locked+0x8eb/0xe10
+ tcp_sendmsg+0x27/0x40
+ sock_sendmsg+0x30/0x40
+ ___sys_sendmsg+0x26d/0x2b0
+ __sys_sendmsg+0x57/0xa0
+ do_syscall_64+0x42/0x100
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+In [0], tcp_send_msg_locked() was using current->page_frag when it
+called sk_wmem_schedule(). It already calculated how many bytes can
+be fit into current->page_frag. Due to memory pressure,
+sk_wmem_schedule() called into memory reclaim path which called into
+xfs and then IO issue path. Because the filesystem in question is
+backed by nbd, the control goes back into the tcp layer - back into
+tcp_sendmsg_locked().
+
+nbd sets sk_allocation to (GFP_NOIO | __GFP_MEMALLOC) which makes
+sense - it's in the process of freeing memory and wants to be able to,
+e.g., drop clean pages to make forward progress. However, this
+confused sk_page_frag() called from [2]. Because it only tests
+whether the allocation allows blocking which it does, it now thinks
+current->page_frag can be used again although it already was being
+used in [0].
+
+After [2] used current->page_frag, the offset would be increased by
+the used amount. When the control returns to [0],
+current->page_frag's offset is increased and the previously calculated
+number of bytes now may overrun the end of allocated memory leading to
+silent memory corruptions.
+
+Fix it by adding gfpflags_normal_context() which tests sleepable &&
+!reclaim and use it to determine whether to use current->task_frag.
+
+v2: Eric didn't like gfp flags being tested twice. Introduce a new
+ helper gfpflags_normal_context() and combine the two tests.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Cc: Josef Bacik <josef@toxicpanda.com>
+Cc: Eric Dumazet <eric.dumazet@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ include/linux/gfp.h | 23 +++++++++++++++++++++++
+ include/net/sock.h | 11 ++++++++---
+ 2 files changed, 31 insertions(+), 3 deletions(-)
+
+--- a/include/linux/gfp.h
++++ b/include/linux/gfp.h
+@@ -325,6 +325,29 @@ static inline bool gfpflags_allow_blocki
+ return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
+ }
+
++/**
++ * gfpflags_normal_context - is gfp_flags a normal sleepable context?
++ * @gfp_flags: gfp_flags to test
++ *
++ * Test whether @gfp_flags indicates that the allocation is from the
++ * %current context and allowed to sleep.
++ *
++ * An allocation being allowed to block doesn't mean it owns the %current
++ * context. When direct reclaim path tries to allocate memory, the
++ * allocation context is nested inside whatever %current was doing at the
++ * time of the original allocation. The nested allocation may be allowed
++ * to block but modifying anything %current owns can corrupt the outer
++ * context's expectations.
++ *
++ * %true result from this function indicates that the allocation context
++ * can sleep and use anything that's associated with %current.
++ */
++static inline bool gfpflags_normal_context(const gfp_t gfp_flags)
++{
++ return (gfp_flags & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC)) ==
++ __GFP_DIRECT_RECLAIM;
++}
++
+ #ifdef CONFIG_HIGHMEM
+ #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
+ #else
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -2242,12 +2242,17 @@ struct sk_buff *sk_stream_alloc_skb(stru
+ * sk_page_frag - return an appropriate page_frag
+ * @sk: socket
+ *
+- * If socket allocation mode allows current thread to sleep, it means its
+- * safe to use the per task page_frag instead of the per socket one.
++ * Use the per task page_frag instead of the per socket one for
++ * optimization when we know that we're in the normal context and owns
++ * everything that's associated with %current.
++ *
++ * gfpflags_allow_blocking() isn't enough here as direct reclaim may nest
++ * inside other socket operations and end up recursing into sk_page_frag()
++ * while it's already in use.
+ */
+ static inline struct page_frag *sk_page_frag(struct sock *sk)
+ {
+- if (gfpflags_allow_blocking(sk->sk_allocation))
++ if (gfpflags_normal_context(sk->sk_allocation))
+ return &current->task_frag;
+
+ return &sk->sk_frag;
diff --git a/patches.suse/net-flow_dissector-switch-to-siphash.patch b/patches.suse/net-flow_dissector-switch-to-siphash.patch
new file mode 100644
index 0000000000..2a4a5ace14
--- /dev/null
+++ b/patches.suse/net-flow_dissector-switch-to-siphash.patch
@@ -0,0 +1,374 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Tue, 22 Oct 2019 07:57:46 -0700
+Subject: net/flow_dissector: switch to siphash
+Patch-mainline: v5.4-rc6
+Git-commit: 55667441c84fa5e0911a0aac44fb059c15ba6da2
+References: bsc#1154353
+
+UDP IPv6 packets auto flowlabels are using a 32bit secret
+(static u32 hashrnd in net/core/flow_dissector.c) and
+apply jhash() over fields known by the receivers.
+
+Attackers can easily infer the 32bit secret and use this information
+to identify a device and/or user, since this 32bit secret is only
+set at boot time.
+
+Really, using jhash() to generate cookies sent on the wire
+is a serious security concern.
+
+Trying to change the rol32(hash, 16) in ip6_make_flowlabel() would be
+a dead end. Trying to periodically change the secret (like in sch_sfq.c)
+could change paths taken in the network for long lived flows.
+
+Let's switch to siphash, as we did in commit df453700e8d8
+("inet: switch IP ID generator to siphash")
+
+Using a cryptographically strong pseudo random function will solve this
+privacy issue and more generally remove other weak points in the stack.
+
+Packet schedulers using skb_get_hash_perturb() benefit from this change.
+
+Fixes: b56774163f99 ("ipv6: Enable auto flow labels by default")
+Fixes: 42240901f7c4 ("ipv6: Implement different admin modes for automatic flow labels")
+Fixes: 67800f9b1f4e ("ipv6: Call skb_get_hash_flowi6 to get skb->hash in ip6_make_flowlabel")
+Fixes: cb1ce2ef387b ("ipv6: Implement automatic flow label generation on transmit")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Jonathan Berger <jonathann1@walla.com>
+Reported-by: Amit Klein <aksecurity@gmail.com>
+Reported-by: Benny Pinkas <benny@pinkas.net>
+Cc: Tom Herbert <tom@herbertland.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ include/linux/skbuff.h | 3 ++-
+ include/net/flow_dissector.h | 3 ++-
+ include/net/fq.h | 2 +-
+ include/net/fq_impl.h | 4 ++--
+ net/core/flow_dissector.c | 38 ++++++++++++++++----------------------
+ net/sched/sch_hhf.c | 8 ++++----
+ net/sched/sch_sfb.c | 13 +++++++------
+ net/sched/sch_sfq.c | 14 ++++++++------
+ 8 files changed, 42 insertions(+), 43 deletions(-)
+
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1348,7 +1348,8 @@ static inline __u32 skb_get_hash_flowi6(
+ return skb->hash;
+ }
+
+-__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
++__u32 skb_get_hash_perturb(const struct sk_buff *skb,
++ const siphash_key_t *perturb);
+
+ static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
+ {
+--- a/include/net/flow_dissector.h
++++ b/include/net/flow_dissector.h
+@@ -4,6 +4,7 @@
+
+ #include <linux/types.h>
+ #include <linux/in6.h>
++#include <linux/siphash.h>
+ #include <uapi/linux/if_ether.h>
+
+ /**
+@@ -276,7 +277,7 @@ struct flow_keys_basic {
+ struct flow_keys {
+ struct flow_dissector_key_control control;
+ #define FLOW_KEYS_HASH_START_FIELD basic
+- struct flow_dissector_key_basic basic;
++ struct flow_dissector_key_basic basic __aligned(SIPHASH_ALIGNMENT);
+ struct flow_dissector_key_tags tags;
+ struct flow_dissector_key_vlan vlan;
+ struct flow_dissector_key_vlan cvlan;
+--- a/include/net/fq.h
++++ b/include/net/fq.h
+@@ -69,7 +69,7 @@ struct fq {
+ struct list_head backlogs;
+ spinlock_t lock;
+ u32 flows_cnt;
+- u32 perturbation;
++ siphash_key_t perturbation;
+ u32 limit;
+ u32 memory_limit;
+ u32 memory_usage;
+--- a/include/net/fq_impl.h
++++ b/include/net/fq_impl.h
+@@ -108,7 +108,7 @@ begin:
+
+ static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb)
+ {
+- u32 hash = skb_get_hash_perturb(skb, fq->perturbation);
++ u32 hash = skb_get_hash_perturb(skb, &fq->perturbation);
+
+ return reciprocal_scale(hash, fq->flows_cnt);
+ }
+@@ -308,7 +308,7 @@ static int fq_init(struct fq *fq, int fl
+ INIT_LIST_HEAD(&fq->backlogs);
+ spin_lock_init(&fq->lock);
+ fq->flows_cnt = max_t(u32, flows_cnt, 1);
+- fq->perturbation = prandom_u32();
++ get_random_bytes(&fq->perturbation, sizeof(fq->perturbation));
+ fq->quantum = 300;
+ fq->limit = 8192;
+ fq->memory_limit = 16 << 20; /* 16 MBytes */
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -1350,30 +1350,21 @@ out_bad:
+ }
+ EXPORT_SYMBOL(__skb_flow_dissect);
+
+-static u32 hashrnd __read_mostly;
++static siphash_key_t hashrnd __read_mostly;
+ static __always_inline void __flow_hash_secret_init(void)
+ {
+ net_get_random_once(&hashrnd, sizeof(hashrnd));
+ }
+
+-static __always_inline u32 __flow_hash_words(const u32 *words, u32 length,
+- u32 keyval)
++static const void *flow_keys_hash_start(const struct flow_keys *flow)
+ {
+- return jhash2(words, length, keyval);
+-}
+-
+-static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow)
+-{
+- const void *p = flow;
+-
+- BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32));
+- return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET);
++ BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % SIPHASH_ALIGNMENT);
++ return &flow->FLOW_KEYS_HASH_START_FIELD;
+ }
+
+ static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
+ {
+ size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
+- BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
+ BUILD_BUG_ON(offsetof(typeof(*flow), addrs) !=
+ sizeof(*flow) - sizeof(flow->addrs));
+
+@@ -1388,7 +1379,7 @@ static inline size_t flow_keys_hash_leng
+ diff -= sizeof(flow->addrs.tipckey);
+ break;
+ }
+- return (sizeof(*flow) - diff) / sizeof(u32);
++ return sizeof(*flow) - diff;
+ }
+
+ __be32 flow_get_u32_src(const struct flow_keys *flow)
+@@ -1454,14 +1445,15 @@ static inline void __flow_hash_consisten
+ }
+ }
+
+-static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval)
++static inline u32 __flow_hash_from_keys(struct flow_keys *keys,
++ const siphash_key_t *keyval)
+ {
+ u32 hash;
+
+ __flow_hash_consistentify(keys);
+
+- hash = __flow_hash_words(flow_keys_hash_start(keys),
+- flow_keys_hash_length(keys), keyval);
++ hash = siphash(flow_keys_hash_start(keys),
++ flow_keys_hash_length(keys), keyval);
+ if (!hash)
+ hash = 1;
+
+@@ -1471,12 +1463,13 @@ static inline u32 __flow_hash_from_keys(
+ u32 flow_hash_from_keys(struct flow_keys *keys)
+ {
+ __flow_hash_secret_init();
+- return __flow_hash_from_keys(keys, hashrnd);
++ return __flow_hash_from_keys(keys, &hashrnd);
+ }
+ EXPORT_SYMBOL(flow_hash_from_keys);
+
+ static inline u32 ___skb_get_hash(const struct sk_buff *skb,
+- struct flow_keys *keys, u32 keyval)
++ struct flow_keys *keys,
++ const siphash_key_t *keyval)
+ {
+ skb_flow_dissect_flow_keys(skb, keys,
+ FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
+@@ -1524,7 +1517,7 @@ u32 __skb_get_hash_symmetric(const struc
+ &keys, NULL, 0, 0, 0,
+ FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
+
+- return __flow_hash_from_keys(&keys, hashrnd);
++ return __flow_hash_from_keys(&keys, &hashrnd);
+ }
+ EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
+
+@@ -1544,13 +1537,14 @@ void __skb_get_hash(struct sk_buff *skb)
+
+ __flow_hash_secret_init();
+
+- hash = ___skb_get_hash(skb, &keys, hashrnd);
++ hash = ___skb_get_hash(skb, &keys, &hashrnd);
+
+ __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
+ }
+ EXPORT_SYMBOL(__skb_get_hash);
+
+-__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb)
++__u32 skb_get_hash_perturb(const struct sk_buff *skb,
++ const siphash_key_t *perturb)
+ {
+ struct flow_keys keys;
+
+--- a/net/sched/sch_hhf.c
++++ b/net/sched/sch_hhf.c
+@@ -5,11 +5,11 @@
+ * Copyright (C) 2013 Nandita Dukkipati <nanditad@google.com>
+ */
+
+-#include <linux/jhash.h>
+ #include <linux/jiffies.h>
+ #include <linux/module.h>
+ #include <linux/skbuff.h>
+ #include <linux/vmalloc.h>
++#include <linux/siphash.h>
+ #include <net/pkt_sched.h>
+ #include <net/sock.h>
+
+@@ -126,7 +126,7 @@ struct wdrr_bucket {
+
+ struct hhf_sched_data {
+ struct wdrr_bucket buckets[WDRR_BUCKET_CNT];
+- u32 perturbation; /* hash perturbation */
++ siphash_key_t perturbation; /* hash perturbation */
+ u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
+ u32 drop_overlimit; /* number of times max qdisc packet
+ * limit was hit
+@@ -264,7 +264,7 @@ static enum wdrr_bucket_idx hhf_classify
+ }
+
+ /* Get hashed flow-id of the skb. */
+- hash = skb_get_hash_perturb(skb, q->perturbation);
++ hash = skb_get_hash_perturb(skb, &q->perturbation);
+
+ /* Check if this packet belongs to an already established HH flow. */
+ flow_pos = hash & HHF_BIT_MASK;
+@@ -582,7 +582,7 @@ static int hhf_init(struct Qdisc *sch, s
+
+ sch->limit = 1000;
+ q->quantum = psched_mtu(qdisc_dev(sch));
+- q->perturbation = prandom_u32();
++ get_random_bytes(&q->perturbation, sizeof(q->perturbation));
+ INIT_LIST_HEAD(&q->new_buckets);
+ INIT_LIST_HEAD(&q->old_buckets);
+
+--- a/net/sched/sch_sfb.c
++++ b/net/sched/sch_sfb.c
+@@ -18,7 +18,7 @@
+ #include <linux/errno.h>
+ #include <linux/skbuff.h>
+ #include <linux/random.h>
+-#include <linux/jhash.h>
++#include <linux/siphash.h>
+ #include <net/ip.h>
+ #include <net/pkt_sched.h>
+ #include <net/pkt_cls.h>
+@@ -45,7 +45,7 @@ struct sfb_bucket {
+ * (Section 4.4 of SFB reference : moving hash functions)
+ */
+ struct sfb_bins {
+- u32 perturbation; /* jhash perturbation */
++ siphash_key_t perturbation; /* siphash key */
+ struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
+ };
+
+@@ -217,7 +217,8 @@ static u32 sfb_compute_qlen(u32 *prob_r,
+
+ static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
+ {
+- q->bins[slot].perturbation = prandom_u32();
++ get_random_bytes(&q->bins[slot].perturbation,
++ sizeof(q->bins[slot].perturbation));
+ }
+
+ static void sfb_swap_slot(struct sfb_sched_data *q)
+@@ -314,9 +315,9 @@ static int sfb_enqueue(struct sk_buff *s
+ /* If using external classifiers, get result and record it. */
+ if (!sfb_classify(skb, fl, &ret, &salt))
+ goto other_drop;
+- sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
++ sfbhash = siphash_1u32(salt, &q->bins[slot].perturbation);
+ } else {
+- sfbhash = skb_get_hash_perturb(skb, q->bins[slot].perturbation);
++ sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation);
+ }
+
+
+@@ -352,7 +353,7 @@ static int sfb_enqueue(struct sk_buff *s
+ /* Inelastic flow */
+ if (q->double_buffering) {
+ sfbhash = skb_get_hash_perturb(skb,
+- q->bins[slot].perturbation);
++ &q->bins[slot].perturbation);
+ if (!sfbhash)
+ sfbhash = 1;
+ sfb_skb_cb(skb)->hashes[slot] = sfbhash;
+--- a/net/sched/sch_sfq.c
++++ b/net/sched/sch_sfq.c
+@@ -14,7 +14,7 @@
+ #include <linux/errno.h>
+ #include <linux/init.h>
+ #include <linux/skbuff.h>
+-#include <linux/jhash.h>
++#include <linux/siphash.h>
+ #include <linux/slab.h>
+ #include <linux/vmalloc.h>
+ #include <net/netlink.h>
+@@ -117,7 +117,7 @@ struct sfq_sched_data {
+ u8 headdrop;
+ u8 maxdepth; /* limit of packets per flow */
+
+- u32 perturbation;
++ siphash_key_t perturbation;
+ u8 cur_depth; /* depth of longest slot */
+ u8 flags;
+ unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
+@@ -157,7 +157,7 @@ static inline struct sfq_head *sfq_dep_h
+ static unsigned int sfq_hash(const struct sfq_sched_data *q,
+ const struct sk_buff *skb)
+ {
+- return skb_get_hash_perturb(skb, q->perturbation) & (q->divisor - 1);
++ return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1);
+ }
+
+ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
+@@ -607,9 +607,11 @@ static void sfq_perturbation(struct time
+ struct sfq_sched_data *q = from_timer(q, t, perturb_timer);
+ struct Qdisc *sch = q->sch;
+ spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
++ siphash_key_t nkey;
+
++ get_random_bytes(&nkey, sizeof(nkey));
+ spin_lock(root_lock);
+- q->perturbation = prandom_u32();
++ q->perturbation = nkey;
+ if (!q->filter_list && q->tail)
+ sfq_rehash(sch);
+ spin_unlock(root_lock);
+@@ -688,7 +690,7 @@ static int sfq_change(struct Qdisc *sch,
+ del_timer(&q->perturb_timer);
+ if (q->perturb_period) {
+ mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
+- q->perturbation = prandom_u32();
++ get_random_bytes(&q->perturbation, sizeof(q->perturbation));
+ }
+ sch_tree_unlock(sch);
+ kfree(p);
+@@ -745,7 +747,7 @@ static int sfq_init(struct Qdisc *sch, s
+ q->quantum = psched_mtu(qdisc_dev(sch));
+ q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
+ q->perturb_period = 0;
+- q->perturbation = prandom_u32();
++ get_random_bytes(&q->perturbation, sizeof(q->perturbation));
+
+ if (opt) {
+ int err = sfq_change(sch, opt);
diff --git a/patches.suse/net-hisilicon-Fix-Trying-to-free-already-free-IRQ.patch b/patches.suse/net-hisilicon-Fix-Trying-to-free-already-free-IRQ.patch
new file mode 100644
index 0000000000..7359278910
--- /dev/null
+++ b/patches.suse/net-hisilicon-Fix-Trying-to-free-already-free-IRQ.patch
@@ -0,0 +1,52 @@
+From: Jiangfeng Xiao <xiaojiangfeng@huawei.com>
+Date: Fri, 25 Oct 2019 21:48:22 +0800
+Subject: net: hisilicon: Fix "Trying to free already-free IRQ"
+Patch-mainline: v5.4-rc6
+Git-commit: 63a41746827cb16dc6ad0d4d761ab4e7dda7a0c3
+References: bsc#1154353
+
+When rmmod hip04_eth.ko, we can get the following warning:
+
+Task track: rmmod(1623)>bash(1591)>login(1581)>init(1)
+------------[ cut here ]------------
+WARNING: CPU: 0 PID: 1623 at kernel/irq/manage.c:1557 __free_irq+0xa4/0x2ac()
+Trying to free already-free IRQ 200
+Modules linked in: ping(O) pramdisk(O) cpuinfo(O) rtos_snapshot(O) interrupt_ctrl(O) mtdblock mtd_blkdevrtfs nfs_acl nfs lockd grace sunrpc xt_tcpudp ipt_REJECT iptable_filter ip_tables x_tables nf_reject_ipv
+CPU: 0 PID: 1623 Comm: rmmod Tainted: G O 4.4.193 #1
+Hardware name: Hisilicon A15
+[<c020b408>] (rtos_unwind_backtrace) from [<c0206624>] (show_stack+0x10/0x14)
+[<c0206624>] (show_stack) from [<c03f2be4>] (dump_stack+0xa0/0xd8)
+[<c03f2be4>] (dump_stack) from [<c021a780>] (warn_slowpath_common+0x84/0xb0)
+[<c021a780>] (warn_slowpath_common) from [<c021a7e8>] (warn_slowpath_fmt+0x3c/0x68)
+[<c021a7e8>] (warn_slowpath_fmt) from [<c026876c>] (__free_irq+0xa4/0x2ac)
+[<c026876c>] (__free_irq) from [<c0268a14>] (free_irq+0x60/0x7c)
+[<c0268a14>] (free_irq) from [<c0469e80>] (release_nodes+0x1c4/0x1ec)
+[<c0469e80>] (release_nodes) from [<c0466924>] (__device_release_driver+0xa8/0x104)
+[<c0466924>] (__device_release_driver) from [<c0466a80>] (driver_detach+0xd0/0xf8)
+[<c0466a80>] (driver_detach) from [<c0465e18>] (bus_remove_driver+0x64/0x8c)
+[<c0465e18>] (bus_remove_driver) from [<c02935b0>] (SyS_delete_module+0x198/0x1e0)
+[<c02935b0>] (SyS_delete_module) from [<c0202ed0>] (__sys_trace_return+0x0/0x10)
+---[ end trace bb25d6123d849b44 ]---
+
+Currently "rmmod hip04_eth.ko" call free_irq more than once
+as devres_release_all and hip04_remove both call free_irq.
+This results in a 'Trying to free already-free IRQ' warning.
+To solve the problem free_irq has been moved out of hip04_remove.
+
+Signed-off-by: Jiangfeng Xiao <xiaojiangfeng@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hip04_eth.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
++++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
+@@ -1038,7 +1038,6 @@ static int hip04_remove(struct platform_
+
+ hip04_free_ring(ndev, d);
+ unregister_netdev(ndev);
+- free_irq(ndev->irq, ndev);
+ of_node_put(priv->phy_node);
+ cancel_work_sync(&priv->tx_timeout_task);
+ free_netdev(ndev);
diff --git a/patches.suse/net-hisilicon-Fix-ping-latency-when-deal-with-high-t.patch b/patches.suse/net-hisilicon-Fix-ping-latency-when-deal-with-high-t.patch
new file mode 100644
index 0000000000..093b5fef06
--- /dev/null
+++ b/patches.suse/net-hisilicon-Fix-ping-latency-when-deal-with-high-t.patch
@@ -0,0 +1,74 @@
+From: Jiangfeng Xiao <xiaojiangfeng@huawei.com>
+Date: Mon, 28 Oct 2019 13:09:46 +0800
+Subject: net: hisilicon: Fix ping latency when deal with high throughput
+Patch-mainline: v5.4-rc6
+Git-commit: e56bd641ca61beb92b135298d5046905f920b734
+References: bsc#1154353
+
+This is due to error in over budget processing.
+When dealing with high throughput, the used buffers
+that exceeds the budget is not cleaned up. In addition,
+it takes a lot of cycles to clean up the used buffer,
+and then the buffer where the valid data is located can take effect.
+
+Signed-off-by: Jiangfeng Xiao <xiaojiangfeng@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hip04_eth.c | 15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
++++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
+@@ -237,6 +237,7 @@ struct hip04_priv {
+ dma_addr_t rx_phys[RX_DESC_NUM];
+ unsigned int rx_head;
+ unsigned int rx_buf_size;
++ unsigned int rx_cnt_remaining;
+
+ struct device_node *phy_node;
+ struct phy_device *phy;
+@@ -575,7 +576,6 @@ static int hip04_rx_poll(struct napi_str
+ struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
+ struct net_device *ndev = priv->ndev;
+ struct net_device_stats *stats = &ndev->stats;
+- unsigned int cnt = hip04_recv_cnt(priv);
+ struct rx_desc *desc;
+ struct sk_buff *skb;
+ unsigned char *buf;
+@@ -588,8 +588,8 @@ static int hip04_rx_poll(struct napi_str
+
+ /* clean up tx descriptors */
+ tx_remaining = hip04_tx_reclaim(ndev, false);
+-
+- while (cnt && !last) {
++ priv->rx_cnt_remaining += hip04_recv_cnt(priv);
++ while (priv->rx_cnt_remaining && !last) {
+ buf = priv->rx_buf[priv->rx_head];
+ skb = build_skb(buf, priv->rx_buf_size);
+ if (unlikely(!skb)) {
+@@ -635,11 +635,13 @@ refill:
+ hip04_set_recv_desc(priv, phys);
+
+ priv->rx_head = RX_NEXT(priv->rx_head);
+- if (rx >= budget)
++ if (rx >= budget) {
++ --priv->rx_cnt_remaining;
+ goto done;
++ }
+
+- if (--cnt == 0)
+- cnt = hip04_recv_cnt(priv);
++ if (--priv->rx_cnt_remaining == 0)
++ priv->rx_cnt_remaining += hip04_recv_cnt(priv);
+ }
+
+ if (!(priv->reg_inten & RCV_INT)) {
+@@ -724,6 +726,7 @@ static int hip04_mac_open(struct net_dev
+ int i;
+
+ priv->rx_head = 0;
++ priv->rx_cnt_remaining = 0;
+ priv->tx_head = 0;
+ priv->tx_tail = 0;
+ hip04_reset_ppe(priv);
diff --git a/patches.suse/net-hns3-fix-mis-counting-IRQ-vector-numbers-issue.patch b/patches.suse/net-hns3-fix-mis-counting-IRQ-vector-numbers-issue.patch
new file mode 100644
index 0000000000..004d455c6d
--- /dev/null
+++ b/patches.suse/net-hns3-fix-mis-counting-IRQ-vector-numbers-issue.patch
@@ -0,0 +1,233 @@
+From: Yonglong Liu <liuyonglong@huawei.com>
+Date: Fri, 18 Oct 2019 11:42:59 +0800
+Subject: net: hns3: fix mis-counting IRQ vector numbers issue
+Patch-mainline: v5.4-rc4
+Git-commit: 580a05f9d4ada3bfb689140d0efec1efdb8a48da
+References: bsc#1154353
+
+Currently, the num_msi_left means the vector numbers of NIC,
+but if the PF supported RoCE, it contains the vector numbers
+of NIC and RoCE(Not expected).
+
+This may cause interrupts lost in some case, because of the
+NIC module used the vector resources which belongs to RoCE.
+
+This patch adds a new variable num_nic_msi to store the vector
+numbers of NIC, and adjust the default TQP numbers and rss_size
+according to the value of num_nic_msi.
+
+Fixes: 46a3df9f9718 ("net: hns3: Add HNS3 Acceleration Engine & Compatibility Layer Support")
+Signed-off-by: Yonglong Liu <liuyonglong@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hnae3.h | 2 +
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 21 ++++++++++
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 1
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c | 11 ++++-
+ drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 28 ++++++++++++--
+ drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h | 1
+ 6 files changed, 58 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+@@ -32,6 +32,8 @@
+
+ #define HNAE3_MOD_VERSION "1.0"
+
++#define HNAE3_MIN_VECTOR_NUM 2 /* first one for misc, another for IO */
++
+ /* Device IDs */
+ #define HNAE3_DEV_ID_GE 0xA220
+ #define HNAE3_DEV_ID_25GE 0xA221
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -906,6 +906,9 @@ static int hclge_query_pf_resource(struc
+ hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
+ HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
+
++ /* nic's msix numbers is always equals to the roce's. */
++ hdev->num_nic_msi = hdev->num_roce_msi;
++
+ /* PF should have NIC vectors and Roce vectors,
+ * NIC vectors are queued before Roce vectors.
+ */
+@@ -915,6 +918,15 @@ static int hclge_query_pf_resource(struc
+ hdev->num_msi =
+ hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
+ HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
++
++ hdev->num_nic_msi = hdev->num_msi;
++ }
++
++ if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
++ dev_err(&hdev->pdev->dev,
++ "Just %u msi resources, not enough for pf(min:2).\n",
++ hdev->num_nic_msi);
++ return -EINVAL;
+ }
+
+ return 0;
+@@ -1507,6 +1519,10 @@ static int hclge_assign_tqp(struct hclg
+ kinfo->rss_size = min_t(u16, hdev->rss_size_max,
+ vport->alloc_tqps / hdev->tm_info.num_tc);
+
++ /* ensure one to one mapping between irq and queue at default */
++ kinfo->rss_size = min_t(u16, kinfo->rss_size,
++ (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
++
+ return 0;
+ }
+
+@@ -2285,7 +2301,8 @@ static int hclge_init_msi(struct hclge_d
+ int vectors;
+ int i;
+
+- vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
++ vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
++ hdev->num_msi,
+ PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ if (vectors < 0) {
+ dev_err(&pdev->dev,
+@@ -2300,6 +2317,7 @@ static int hclge_init_msi(struct hclge_d
+
+ hdev->num_msi = vectors;
+ hdev->num_msi_left = vectors;
++
+ hdev->base_msi_vector = pdev->irq;
+ hdev->roce_base_vector = hdev->base_msi_vector +
+ hdev->roce_base_msix_offset;
+@@ -3903,6 +3921,7 @@ static int hclge_get_vector(struct hnae3
+ int alloc = 0;
+ int i, j;
+
++ vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
+ vector_num = min(hdev->num_msi_left, vector_num);
+
+ for (j = 0; j < vector_num; j++) {
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+@@ -763,6 +763,7 @@ struct hclge_dev {
+ u32 base_msi_vector;
+ u16 *vector_status;
+ int *vector_irq;
++ u16 num_nic_msi; /* Num of nic vectors for this PF */
+ u16 num_roce_msi; /* Num of roce vectors for this PF */
+ int roce_base_vector;
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+@@ -537,9 +537,16 @@ static void hclge_tm_vport_tc_info_updat
+ kinfo->rss_size = kinfo->req_rss_size;
+ } else if (kinfo->rss_size > max_rss_size ||
+ (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
++ /* if user not set rss, the rss_size should compare with the
++ * valid msi numbers to ensure one to one map between tqp and
++ * irq as default.
++ */
++ if (!kinfo->req_rss_size)
++ max_rss_size = min_t(u16, max_rss_size,
++ (hdev->num_nic_msi - 1) /
++ kinfo->num_tc);
++
+ /* Set to the maximum specification value (max_rss_size). */
+- dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
+- kinfo->rss_size, max_rss_size);
+ kinfo->rss_size = max_rss_size;
+ }
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -411,6 +411,13 @@ static int hclgevf_knic_setup(struct hcl
+ kinfo->tqp[i] = &hdev->htqp[i].q;
+ }
+
++ /* after init the max rss_size and tqps, adjust the default tqp numbers
++ * and rss size with the actual vector numbers
++ */
++ kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps);
++ kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc,
++ kinfo->rss_size);
++
+ return 0;
+ }
+
+@@ -502,6 +509,7 @@ static int hclgevf_get_vector(struct hna
+ int alloc = 0;
+ int i, j;
+
++ vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num);
+ vector_num = min(hdev->num_msi_left, vector_num);
+
+ for (j = 0; j < vector_num; j++) {
+@@ -2246,13 +2254,14 @@ static int hclgevf_init_msi(struct hclge
+ int vectors;
+ int i;
+
+- if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B))
++ if (hnae3_dev_roce_supported(hdev))
+ vectors = pci_alloc_irq_vectors(pdev,
+ hdev->roce_base_msix_offset + 1,
+ hdev->num_msi,
+ PCI_IRQ_MSIX);
+ else
+- vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
++ vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
++ hdev->num_msi,
+ PCI_IRQ_MSI | PCI_IRQ_MSIX);
+
+ if (vectors < 0) {
+@@ -2268,6 +2277,7 @@ static int hclgevf_init_msi(struct hclge
+
+ hdev->num_msi = vectors;
+ hdev->num_msi_left = vectors;
++
+ hdev->base_msi_vector = pdev->irq;
+ hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset;
+
+@@ -2533,7 +2543,7 @@ static int hclgevf_query_vf_resource(str
+
+ req = (struct hclgevf_query_res_cmd *)desc.data;
+
+- if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) {
++ if (hnae3_dev_roce_supported(hdev)) {
+ hdev->roce_base_msix_offset =
+ hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
+ HCLGEVF_MSIX_OFT_ROCEE_M,
+@@ -2542,6 +2552,9 @@ static int hclgevf_query_vf_resource(str
+ hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
+ HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
+
++ /* nic's msix numbers is always equals to the roce's. */
++ hdev->num_nic_msix = hdev->num_roce_msix;
++
+ /* VF should have NIC vectors and Roce vectors, NIC vectors
+ * are queued before Roce vectors. The offset is fixed to 64.
+ */
+@@ -2551,6 +2564,15 @@ static int hclgevf_query_vf_resource(str
+ hdev->num_msi =
+ hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
+ HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
++
++ hdev->num_nic_msix = hdev->num_msi;
++ }
++
++ if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) {
++ dev_err(&hdev->pdev->dev,
++ "Just %u msi resources, not enough for vf(min:2).\n",
++ hdev->num_nic_msix);
++ return -EINVAL;
+ }
+
+ return 0;
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+@@ -270,6 +270,7 @@ struct hclgevf_dev {
+ u16 num_msi;
+ u16 num_msi_left;
+ u16 num_msi_used;
++ u16 num_nic_msix; /* Num of nic vectors for this VF */
+ u16 num_roce_msix; /* Num of roce vectors for this VF */
+ u16 roce_base_msix_offset;
+ int roce_base_vector;
diff --git a/patches.suse/net-mlx4_core-Dynamically-set-guaranteed-amount-of-c.patch b/patches.suse/net-mlx4_core-Dynamically-set-guaranteed-amount-of-c.patch
new file mode 100644
index 0000000000..49a052b884
--- /dev/null
+++ b/patches.suse/net-mlx4_core-Dynamically-set-guaranteed-amount-of-c.patch
@@ -0,0 +1,92 @@
+From: Eran Ben Elisha <eranbe@mellanox.com>
+Date: Sun, 27 Oct 2019 16:39:15 +0200
+Subject: net/mlx4_core: Dynamically set guaranteed amount of counters per VF
+Patch-mainline: v5.4-rc6
+Git-commit: e19868efea0c103f23b4b7e986fd0a703822111f
+References: jsc#SLE-8460
+
+Prior to this patch, the amount of counters guaranteed per VF in the
+resource tracker was MLX4_VF_COUNTERS_PER_PORT * MLX4_MAX_PORTS. It was
+set regardless if the VF was single or dual port.
+This caused several VFs to have no guaranteed counters although the
+system could satisfy their request.
+
+The fix is to dynamically guarantee counters, based on each VF
+specification.
+
+Fixes: 9de92c60beaa ("net/mlx4_core: Adjust counter grant policy in the resource tracker")
+Signed-off-by: Eran Ben Elisha <eranbe@mellanox.com>
+Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
+Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | 42 +++++++++++-------
+ 1 file changed, 26 insertions(+), 16 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+@@ -471,12 +471,31 @@ void mlx4_init_quotas(struct mlx4_dev *d
+ priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
+ }
+
+-static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
++static int
++mlx4_calc_res_counter_guaranteed(struct mlx4_dev *dev,
++ struct resource_allocator *res_alloc,
++ int vf)
+ {
+- /* reduce the sink counter */
+- return (dev->caps.max_counters - 1 -
+- (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
+- / MLX4_MAX_PORTS;
++ struct mlx4_active_ports actv_ports;
++ int ports, counters_guaranteed;
++
++ /* For master, only allocate according to the number of phys ports */
++ if (vf == mlx4_master_func_num(dev))
++ return MLX4_PF_COUNTERS_PER_PORT * dev->caps.num_ports;
++
++ /* calculate real number of ports for the VF */
++ actv_ports = mlx4_get_active_ports(dev, vf);
++ ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
++ counters_guaranteed = ports * MLX4_VF_COUNTERS_PER_PORT;
++
++ /* If we do not have enough counters for this VF, do not
++ * allocate any for it. '-1' to reduce the sink counter.
++ */
++ if ((res_alloc->res_reserved + counters_guaranteed) >
++ (dev->caps.max_counters - 1))
++ return 0;
++
++ return counters_guaranteed;
+ }
+
+ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
+@@ -484,7 +503,6 @@ int mlx4_init_resource_tracker(struct ml
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i, j;
+ int t;
+- int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
+
+ priv->mfunc.master.res_tracker.slave_list =
+ kcalloc(dev->num_slaves, sizeof(struct slave_list),
+@@ -603,16 +621,8 @@ int mlx4_init_resource_tracker(struct ml
+ break;
+ case RES_COUNTER:
+ res_alloc->quota[t] = dev->caps.max_counters;
+- if (t == mlx4_master_func_num(dev))
+- res_alloc->guaranteed[t] =
+- MLX4_PF_COUNTERS_PER_PORT *
+- MLX4_MAX_PORTS;
+- else if (t <= max_vfs_guarantee_counter)
+- res_alloc->guaranteed[t] =
+- MLX4_VF_COUNTERS_PER_PORT *
+- MLX4_MAX_PORTS;
+- else
+- res_alloc->guaranteed[t] = 0;
++ res_alloc->guaranteed[t] =
++ mlx4_calc_res_counter_guaranteed(dev, res_alloc, t);
+ break;
+ default:
+ break;
diff --git a/patches.suse/net-mlx5-DR-Allow-insertion-of-duplicate-rules.patch b/patches.suse/net-mlx5-DR-Allow-insertion-of-duplicate-rules.patch
new file mode 100644
index 0000000000..fdfb8f0291
--- /dev/null
+++ b/patches.suse/net-mlx5-DR-Allow-insertion-of-duplicate-rules.patch
@@ -0,0 +1,42 @@
+From: Alex Vesker <valex@mellanox.com>
+Date: Mon, 7 Oct 2019 16:13:25 +0300
+Subject: net/mlx5: DR, Allow insertion of duplicate rules
+Patch-mainline: v5.4-rc4
+Git-commit: 0041412694eca70387aee4076254fbed8222700a
+References: jsc#SLE-8464
+
+Duplicate rules were not allowed to be configured with SW steering.
+This restriction caused failures with the replace rule logic done by
+upper layers.
+
+This fix allows for multiple rules with the same match values, in
+such case the first inserted rules will match.
+
+Fixes: 41d07074154c ("net/mlx5: DR, Expose steering rule functionality")
+Signed-off-by: Alex Vesker <valex@mellanox.com>
+Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+@@ -788,12 +788,10 @@ again:
+ * it means that all the previous stes are the same,
+ * if so, this rule is duplicated.
+ */
+- if (mlx5dr_ste_is_last_in_rule(nic_matcher,
+- matched_ste->ste_chain_location)) {
+- mlx5dr_info(dmn, "Duplicate rule inserted, aborting!!\n");
+- return NULL;
+- }
+- return matched_ste;
++ if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste_location))
++ return matched_ste;
++
++ mlx5dr_dbg(dmn, "Duplicate rule inserted\n");
+ }
+
+ if (!skip_rehash && dr_rule_need_enlarge_hash(cur_htbl, dmn, nic_dmn)) {
diff --git a/patches.suse/net-mlx5-Fix-NULL-pointer-dereference-in-extended-de.patch b/patches.suse/net-mlx5-Fix-NULL-pointer-dereference-in-extended-de.patch
new file mode 100644
index 0000000000..710397976f
--- /dev/null
+++ b/patches.suse/net-mlx5-Fix-NULL-pointer-dereference-in-extended-de.patch
@@ -0,0 +1,32 @@
+From: Eli Britstein <elibr@mellanox.com>
+Date: Tue, 15 Oct 2019 12:44:18 +0000
+Subject: net/mlx5: Fix NULL pointer dereference in extended destination
+Patch-mainline: v5.4-rc6
+Git-commit: 0fd79b1e17bec8460039f6bdb57163a0442110d9
+References: jsc#SLE-8464
+
+The cited commit refactored the encap id into a struct pointed from the
+destination.
+Bug fix for the case there is no encap for one of the destinations.
+
+Fixes: 2b688ea5efde ("net/mlx5: Add flow steering actions to fs_cmd shim layer")
+Signed-off-by: Eli Britstein <elibr@mellanox.com>
+Reviewed-by: Roi Dayan <roid@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+@@ -507,7 +507,8 @@ static int mlx5_cmd_set_fte(struct mlx5_
+ MLX5_SET(dest_format_struct, in_dests,
+ destination_eswitch_owner_vhca_id,
+ dst->dest_attr.vport.vhca_id);
+- if (extended_dest) {
++ if (extended_dest &&
++ dst->dest_attr.vport.pkt_reformat) {
+ MLX5_SET(dest_format_struct, in_dests,
+ packet_reformat,
+ !!(dst->dest_attr.vport.flags &
diff --git a/patches.suse/net-mlx5-Fix-rtable-reference-leak.patch b/patches.suse/net-mlx5-Fix-rtable-reference-leak.patch
new file mode 100644
index 0000000000..c50f2c10ef
--- /dev/null
+++ b/patches.suse/net-mlx5-Fix-rtable-reference-leak.patch
@@ -0,0 +1,56 @@
+From: Parav Pandit <parav@mellanox.com>
+Date: Thu, 19 Sep 2019 15:58:14 -0500
+Subject: net/mlx5: Fix rtable reference leak
+Patch-mainline: v5.4-rc6
+Git-commit: 2347cee83b2bd868bde2d283db0fac89f22be4e0
+References: jsc#SLE-8464
+
+If the rt entry gateway family is not AF_INET for multipath device,
+rtable reference is leaked.
+Hence, fix it by releasing the reference.
+
+Fixes: 5fb091e8130b ("net/mlx5e: Use hint to resolve route when in HW multipath mode")
+Fixes: e32ee6c78efa ("net/mlx5e: Support tunnel encap over tagged Ethernet")
+Signed-off-by: Parav Pandit <parav@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+@@ -97,15 +97,19 @@ static int mlx5e_route_lookup_ipv4(struc
+ if (ret)
+ return ret;
+
+- if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET)
++ if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) {
++ ip_rt_put(rt);
+ return -ENETUNREACH;
++ }
+ #else
+ return -EOPNOTSUPP;
+ #endif
+
+ ret = get_route_and_out_devs(priv, rt->dst.dev, route_dev, out_dev);
+- if (ret < 0)
++ if (ret < 0) {
++ ip_rt_put(rt);
+ return ret;
++ }
+
+ if (!(*out_ttl))
+ *out_ttl = ip4_dst_hoplimit(&rt->dst);
+@@ -149,8 +153,10 @@ static int mlx5e_route_lookup_ipv6(struc
+ *out_ttl = ip6_dst_hoplimit(dst);
+
+ ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev);
+- if (ret < 0)
++ if (ret < 0) {
++ dst_release(dst);
+ return ret;
++ }
+ #else
+ return -EOPNOTSUPP;
+ #endif
diff --git a/patches.suse/net-mlx5-fix-memory-leak-in-mlx5_fw_fatal_reporter_d.patch b/patches.suse/net-mlx5-fix-memory-leak-in-mlx5_fw_fatal_reporter_d.patch
new file mode 100644
index 0000000000..1bb9d85f15
--- /dev/null
+++ b/patches.suse/net-mlx5-fix-memory-leak-in-mlx5_fw_fatal_reporter_d.patch
@@ -0,0 +1,31 @@
+From: Navid Emamdoost <navid.emamdoost@gmail.com>
+Date: Fri, 27 Sep 2019 17:37:28 -0500
+Subject: net/mlx5: fix memory leak in mlx5_fw_fatal_reporter_dump
+Patch-mainline: v5.4-rc6
+Git-commit: c7ed6d0183d5ea9bc31bcaeeba4070bd62546471
+References: jsc#SLE-8464
+
+In mlx5_fw_fatal_reporter_dump if mlx5_crdump_collect fails the
+allocated memory for cr_data must be released otherwise there will be
+memory leak. To fix this, this commit changes the return instruction
+into goto error handling.
+
+Fixes: 9b1f29823605 ("net/mlx5: Add support for FW fatal reporter dump")
+Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/health.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+@@ -572,7 +572,7 @@ mlx5_fw_fatal_reporter_dump(struct devli
+ return -ENOMEM;
+ err = mlx5_crdump_collect(dev, cr_data);
+ if (err)
+- return err;
++ goto free_data;
+
+ if (priv_ctx) {
+ struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
diff --git a/patches.suse/net-mlx5-prevent-memory-leak-in-mlx5_fpga_conn_creat.patch b/patches.suse/net-mlx5-prevent-memory-leak-in-mlx5_fpga_conn_creat.patch
new file mode 100644
index 0000000000..d3d3f775c4
--- /dev/null
+++ b/patches.suse/net-mlx5-prevent-memory-leak-in-mlx5_fpga_conn_creat.patch
@@ -0,0 +1,32 @@
+From: Navid Emamdoost <navid.emamdoost@gmail.com>
+Date: Tue, 24 Sep 2019 22:20:34 -0500
+Subject: net/mlx5: prevent memory leak in mlx5_fpga_conn_create_cq
+Patch-mainline: v5.4-rc6
+Git-commit: c8c2a057fdc7de1cd16f4baa51425b932a42eb39
+References: jsc#SLE-8464
+
+In mlx5_fpga_conn_create_cq if mlx5_vector2eqn fails the allocated
+memory should be released.
+
+Fixes: 537a50574175 ("net/mlx5: FPGA, Add high-speed connection routines")
+Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+@@ -464,8 +464,10 @@ static int mlx5_fpga_conn_create_cq(stru
+ }
+
+ err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
+- if (err)
++ if (err) {
++ kvfree(in);
+ goto err_cqwq;
++ }
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+ MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size));
diff --git a/patches.suse/net-mlx5e-Determine-source-port-properly-for-vlan-pu.patch b/patches.suse/net-mlx5e-Determine-source-port-properly-for-vlan-pu.patch
new file mode 100644
index 0000000000..cf9cbeaa8b
--- /dev/null
+++ b/patches.suse/net-mlx5e-Determine-source-port-properly-for-vlan-pu.patch
@@ -0,0 +1,64 @@
+From: Dmytro Linkin <dmitrolin@mellanox.com>
+Date: Wed, 4 Sep 2019 12:32:49 +0000
+Subject: net/mlx5e: Determine source port properly for vlan push action
+Patch-mainline: v5.4-rc6
+Git-commit: d5dbcc4e87bc8444bd2f1ca4b8f787e1e5677ec2
+References: jsc#SLE-8464
+
+Termination tables are used for vlan push actions on uplink ports.
+To support RoCE dual port the source port value was placed in a register.
+Fix the code to use an API method returning the source port according to
+the FW capabilities.
+
+Fixes: 10caabdaad5a ("net/mlx5e: Use termination table for VLAN push actions")
+Signed-off-by: Dmytro Linkin <dmitrolin@mellanox.com>
+Reviewed-by: Jianbo Liu <jianbol@mellanox.com>
+Reviewed-by: Oz Shlomo <ozsh@mellanox.com>
+Reviewed-by: Roi Dayan <roid@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c | 22 +++++++---
+ 1 file changed, 16 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+@@ -177,22 +177,32 @@ mlx5_eswitch_termtbl_actions_move(struct
+ memset(&src->vlan[1], 0, sizeof(src->vlan[1]));
+ }
+
++static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
++ const struct mlx5_flow_spec *spec)
++{
++ u32 port_mask, port_value;
++
++ if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
++ return spec->flow_context.flow_source == MLX5_VPORT_UPLINK;
++
++ port_mask = MLX5_GET(fte_match_param, spec->match_criteria,
++ misc_parameters.source_port);
++ port_value = MLX5_GET(fte_match_param, spec->match_value,
++ misc_parameters.source_port);
++ return (port_mask & port_value & 0xffff) == MLX5_VPORT_UPLINK;
++}
++
+ bool
+ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_spec *spec)
+ {
+- u32 port_mask = MLX5_GET(fte_match_param, spec->match_criteria,
+- misc_parameters.source_port);
+- u32 port_value = MLX5_GET(fte_match_param, spec->match_value,
+- misc_parameters.source_port);
+-
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table))
+ return false;
+
+ /* push vlan on RX */
+ return (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) &&
+- ((port_mask & port_value) == MLX5_VPORT_UPLINK);
++ mlx5_eswitch_offload_is_uplink_port(esw, spec);
+ }
+
+ struct mlx5_flow_handle *
diff --git a/patches.suse/net-mlx5e-Don-t-store-direct-pointer-to-action-s-tun.patch b/patches.suse/net-mlx5e-Don-t-store-direct-pointer-to-action-s-tun.patch
new file mode 100644
index 0000000000..12ce8f65f2
--- /dev/null
+++ b/patches.suse/net-mlx5e-Don-t-store-direct-pointer-to-action-s-tun.patch
@@ -0,0 +1,267 @@
+From: Vlad Buslov <vladbu@mellanox.com>
+Date: Tue, 10 Sep 2019 14:38:17 +0300
+Subject: net/mlx5e: Don't store direct pointer to action's tunnel info
+Patch-mainline: v5.4-rc6
+Git-commit: 2a4b6526236791a1bb8092079ad87a1629e78db5
+References: jsc#SLE-8464
+
+Geneve implementation changed mlx5 tc to user direct pointer to tunnel_key
+action's internal struct ip_tunnel_info instance. However, this leads to
+use-after-free error when initial filter that caused creation of new encap
+entry is deleted or when tunnel_key action is manually overwritten through
+action API. Moreover, with recent TC offloads API unlocking change struct
+flow_action_entry->tunnel point to temporal copy of tunnel info that is
+deallocated after filter is offloaded to hardware which causes bug to
+reproduce every time new filter is attached to existing encap entry with
+following KASAN bug:
+
+[ 314.885555] ==================================================================
+[ 314.886641] BUG: KASAN: use-after-free in memcmp+0x2c/0x60
+[ 314.886864] Read of size 1 at addr ffff88886c746280 by task tc/2682
+
+[ 314.887179] CPU: 22 PID: 2682 Comm: tc Not tainted 5.3.0-rc7+ #703
+[ 314.887188] Hardware name: Supermicro SYS-2028TP-DECR/X10DRT-P, BIOS 2.0b 03/30/2017
+[ 314.887195] Call Trace:
+[ 314.887215] dump_stack+0x9a/0xf0
+[ 314.887236] print_address_description+0x67/0x323
+[ 314.887248] ? memcmp+0x2c/0x60
+[ 314.887257] ? memcmp+0x2c/0x60
+[ 314.887272] __kasan_report.cold+0x1a/0x3d
+[ 314.887474] ? __mlx5e_tc_del_fdb_peer_flow+0x100/0x1b0 [mlx5_core]
+[ 314.887484] ? memcmp+0x2c/0x60
+[ 314.887509] kasan_report+0xe/0x12
+[ 314.887521] memcmp+0x2c/0x60
+[ 314.887662] mlx5e_tc_add_fdb_flow+0x51b/0xbe0 [mlx5_core]
+[ 314.887838] ? mlx5e_encap_take+0x110/0x110 [mlx5_core]
+[ 314.887902] ? lockdep_init_map+0x87/0x2c0
+[ 314.887924] ? __init_waitqueue_head+0x4f/0x60
+[ 314.888062] ? mlx5e_alloc_flow.isra.0+0x18c/0x1c0 [mlx5_core]
+[ 314.888207] __mlx5e_add_fdb_flow+0x2d7/0x440 [mlx5_core]
+[ 314.888359] ? mlx5e_tc_update_neigh_used_value+0x6f0/0x6f0 [mlx5_core]
+[ 314.888374] ? match_held_lock+0x2e/0x240
+[ 314.888537] mlx5e_configure_flower+0x830/0x16a0 [mlx5_core]
+[ 314.888702] ? __mlx5e_add_fdb_flow+0x440/0x440 [mlx5_core]
+[ 314.888713] ? down_read+0x118/0x2c0
+[ 314.888728] ? down_read_killable+0x300/0x300
+[ 314.888882] ? mlx5e_rep_get_ethtool_stats+0x180/0x180 [mlx5_core]
+[ 314.888899] tc_setup_cb_add+0x127/0x270
+[ 314.888937] fl_hw_replace_filter+0x2ac/0x380 [cls_flower]
+[ 314.888976] ? fl_hw_destroy_filter+0x1b0/0x1b0 [cls_flower]
+[ 314.888990] ? fl_change+0xbcf/0x27ef [cls_flower]
+[ 314.889030] ? fl_change+0xa57/0x27ef [cls_flower]
+[ 314.889069] fl_change+0x16bd/0x27ef [cls_flower]
+[ 314.889135] ? __rhashtable_insert_fast.constprop.0+0xa00/0xa00 [cls_flower]
+[ 314.889167] ? __radix_tree_lookup+0xa4/0x130
+[ 314.889200] ? fl_get+0x169/0x240 [cls_flower]
+[ 314.889218] ? fl_walk+0x230/0x230 [cls_flower]
+[ 314.889249] tc_new_tfilter+0x5e1/0xd40
+[ 314.889281] ? __rhashtable_insert_fast.constprop.0+0xa00/0xa00 [cls_flower]
+[ 314.889309] ? tc_del_tfilter+0xa30/0xa30
+[ 314.889335] ? __lock_acquire+0x5b5/0x2460
+[ 314.889378] ? find_held_lock+0x85/0xa0
+[ 314.889442] ? tc_del_tfilter+0xa30/0xa30
+[ 314.889465] rtnetlink_rcv_msg+0x4ab/0x5f0
+[ 314.889488] ? rtnl_dellink+0x490/0x490
+[ 314.889518] ? lockdep_hardirqs_on+0x260/0x260
+[ 314.889538] ? netlink_deliver_tap+0xab/0x5a0
+[ 314.889550] ? match_held_lock+0x1b/0x240
+[ 314.889575] netlink_rcv_skb+0xd0/0x200
+[ 314.889588] ? rtnl_dellink+0x490/0x490
+[ 314.889605] ? netlink_ack+0x440/0x440
+[ 314.889635] ? netlink_deliver_tap+0x161/0x5a0
+[ 314.889648] ? lock_downgrade+0x360/0x360
+[ 314.889657] ? lock_acquire+0xe5/0x210
+[ 314.889686] netlink_unicast+0x296/0x350
+[ 314.889707] ? netlink_attachskb+0x390/0x390
+[ 314.889726] ? _copy_from_iter_full+0xe0/0x3a0
+[ 314.889738] ? __virt_addr_valid+0xbb/0x130
+[ 314.889771] netlink_sendmsg+0x394/0x600
+[ 314.889800] ? netlink_unicast+0x350/0x350
+[ 314.889817] ? move_addr_to_kernel.part.0+0x90/0x90
+[ 314.889852] ? netlink_unicast+0x350/0x350
+[ 314.889872] sock_sendmsg+0x96/0xa0
+[ 314.889891] ___sys_sendmsg+0x482/0x520
+[ 314.889919] ? copy_msghdr_from_user+0x250/0x250
+[ 314.889930] ? __fput+0x1fa/0x390
+[ 314.889941] ? task_work_run+0xb7/0xf0
+[ 314.889957] ? exit_to_usermode_loop+0x117/0x120
+[ 314.889972] ? entry_SYSCALL_64_after_hwframe+0x49/0xbe
+[ 314.889982] ? do_syscall_64+0x74/0xe0
+[ 314.889992] ? entry_SYSCALL_64_after_hwframe+0x49/0xbe
+[ 314.890012] ? mark_lock+0xac/0x9a0
+[ 314.890028] ? __lock_acquire+0x5b5/0x2460
+[ 314.890053] ? mark_lock+0xac/0x9a0
+[ 314.890083] ? __lock_acquire+0x5b5/0x2460
+[ 314.890112] ? match_held_lock+0x1b/0x240
+[ 314.890144] ? __fget_light+0xa1/0xf0
+[ 314.890166] ? sockfd_lookup_light+0x91/0xb0
+[ 314.890187] __sys_sendmsg+0xba/0x130
+[ 314.890201] ? __sys_sendmsg_sock+0xb0/0xb0
+[ 314.890225] ? __blkcg_punt_bio_submit+0xd0/0xd0
+[ 314.890264] ? lockdep_hardirqs_off+0xbe/0x100
+[ 314.890274] ? mark_held_locks+0x24/0x90
+[ 314.890286] ? do_syscall_64+0x1e/0xe0
+[ 314.890308] do_syscall_64+0x74/0xe0
+[ 314.890325] entry_SYSCALL_64_after_hwframe+0x49/0xbe
+[ 314.890336] RIP: 0033:0x7f00ca33d7b8
+[ 314.890348] Code: 89 02 48 c7 c0 ff ff ff ff eb bb 0f 1f 80 00 00 00 00 f3 0f 1e fa 48 8d 05 65 8f 0c 00 8b 00 85 c0 75 17 b8 2e 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 58 c3 0f 1f 80 00 00 00 00 48 83 ec 28 89 5
+4
+[ 314.890356] RSP: 002b:00007ffea2983928 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
+[ 314.890369] RAX: ffffffffffffffda RBX: 000000005d777d5b RCX: 00007f00ca33d7b8
+[ 314.890377] RDX: 0000000000000000 RSI: 00007ffea2983990 RDI: 0000000000000003
+[ 314.890384] RBP: 0000000000000000 R08: 0000000000000001 R09: 0000000000000006
+[ 314.890392] R10: 0000000000404eda R11: 0000000000000246 R12: 0000000000000001
+[ 314.890400] R13: 000000000047f640 R14: 00007ffea2987b58 R15: 0000000000000021
+
+[ 314.890529] Allocated by task 2687:
+[ 314.890684] save_stack+0x1b/0x80
+[ 314.890694] __kasan_kmalloc.constprop.0+0xc2/0xd0
+[ 314.890705] __kmalloc_track_caller+0x102/0x340
+[ 314.890721] kmemdup+0x1d/0x40
+[ 314.890730] tc_setup_flow_action+0x731/0x2c27
+[ 314.890743] fl_hw_replace_filter+0x23b/0x380 [cls_flower]
+[ 314.890756] fl_change+0x16bd/0x27ef [cls_flower]
+[ 314.890765] tc_new_tfilter+0x5e1/0xd40
+[ 314.890776] rtnetlink_rcv_msg+0x4ab/0x5f0
+[ 314.890786] netlink_rcv_skb+0xd0/0x200
+[ 314.890796] netlink_unicast+0x296/0x350
+[ 314.890805] netlink_sendmsg+0x394/0x600
+[ 314.890815] sock_sendmsg+0x96/0xa0
+[ 314.890825] ___sys_sendmsg+0x482/0x520
+[ 314.890834] __sys_sendmsg+0xba/0x130
+[ 314.890844] do_syscall_64+0x74/0xe0
+[ 314.890854] entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+[ 314.890937] Freed by task 2687:
+[ 314.891076] save_stack+0x1b/0x80
+[ 314.891086] __kasan_slab_free+0x12c/0x170
+[ 314.891095] kfree+0xeb/0x2f0
+[ 314.891106] tc_cleanup_flow_action+0x69/0xa0
+[ 314.891119] fl_hw_replace_filter+0x2c5/0x380 [cls_flower]
+[ 314.891132] fl_change+0x16bd/0x27ef [cls_flower]
+[ 314.891140] tc_new_tfilter+0x5e1/0xd40
+[ 314.891151] rtnetlink_rcv_msg+0x4ab/0x5f0
+[ 314.891161] netlink_rcv_skb+0xd0/0x200
+[ 314.891170] netlink_unicast+0x296/0x350
+[ 314.891180] netlink_sendmsg+0x394/0x600
+[ 314.891190] sock_sendmsg+0x96/0xa0
+[ 314.891200] ___sys_sendmsg+0x482/0x520
+[ 314.891208] __sys_sendmsg+0xba/0x130
+[ 314.891218] do_syscall_64+0x74/0xe0
+[ 314.891228] entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+[ 314.891315] The buggy address belongs to the object at ffff88886c746280
+ which belongs to the cache kmalloc-96 of size 96
+[ 314.891762] The buggy address is located 0 bytes inside of
+ 96-byte region [ffff88886c746280, ffff88886c7462e0)
+[ 314.892196] The buggy address belongs to the page:
+[ 314.892387] page:ffffea0021b1d180 refcount:1 mapcount:0 mapping:ffff88835d00ef80 index:0x0
+[ 314.892398] flags: 0x57ffffc0000200(slab)
+[ 314.892413] raw: 0057ffffc0000200 ffffea00219e0340 0000000800000008 ffff88835d00ef80
+[ 314.892423] raw: 0000000000000000 0000000080200020 00000001ffffffff 0000000000000000
+[ 314.892430] page dumped because: kasan: bad access detected
+
+[ 314.892515] Memory state around the buggy address:
+[ 314.892707] ffff88886c746180: fb fb fb fb fb fb fb fb fb fb fb fb fc fc fc fc
+[ 314.892976] ffff88886c746200: fb fb fb fb fb fb fb fb fb fb fb fb fc fc fc fc
+[ 314.893251] >ffff88886c746280: fb fb fb fb fb fb fb fb fb fb fb fb fc fc fc fc
+[ 314.893522] ^
+[ 314.893657] ffff88886c746300: fb fb fb fb fb fb fb fb fb fb fb fb fc fc fc fc
+[ 314.893924] ffff88886c746380: 00 00 00 00 00 00 00 00 00 fc fc fc fc fc fc fc
+[ 314.894189] ==================================================================
+
+Fix the issue by duplicating tunnel info into per-encap copy that is
+deallocated with encap structure. Also, duplicate tunnel info in flow parse
+attribute to support cases when flow might be attached asynchronously.
+
+Fixes: 1f6da30697d0 ("net/mlx5e: Geneve, Keep tunnel info as pointer to the original struct")
+Signed-off-by: Vlad Buslov <vladbu@mellanox.com>
+Reviewed-by: Yevgeny Kliteynik <kliteyn@mellanox.com>
+Reviewed-by: Roi Dayan <roid@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 34 +++++++++++++++++++-----
+ 1 file changed, 27 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -1278,8 +1278,10 @@ static void mlx5e_tc_del_fdb_flow(struct
+ mlx5_eswitch_del_vlan_action(esw, attr);
+
+ for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
+- if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
++ if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
+ mlx5e_detach_encap(priv, flow, out_index);
++ kfree(attr->parse_attr->tun_info[out_index]);
++ }
+ kvfree(attr->parse_attr);
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+@@ -1559,6 +1561,7 @@ static void mlx5e_encap_dealloc(struct m
+ mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
+ }
+
++ kfree(e->tun_info);
+ kfree(e->encap_header);
+ kfree_rcu(e, rcu);
+ }
+@@ -2972,6 +2975,13 @@ mlx5e_encap_get(struct mlx5e_priv *priv,
+ return NULL;
+ }
+
++static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info)
++{
++ size_t tun_size = sizeof(*tun_info) + tun_info->options_len;
++
++ return kmemdup(tun_info, tun_size, GFP_KERNEL);
++}
++
+ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct net_device *mirred_dev,
+@@ -3028,13 +3038,15 @@ static int mlx5e_attach_encap(struct mlx
+ refcount_set(&e->refcnt, 1);
+ init_completion(&e->res_ready);
+
++ tun_info = dup_tun_info(tun_info);
++ if (!tun_info) {
++ err = -ENOMEM;
++ goto out_err_init;
++ }
+ e->tun_info = tun_info;
+ err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
+- if (err) {
+- kfree(e);
+- e = NULL;
+- goto out_err;
+- }
++ if (err)
++ goto out_err_init;
+
+ INIT_LIST_HEAD(&e->flows);
+ hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
+@@ -3075,6 +3087,12 @@ out_err:
+ if (e)
+ mlx5e_encap_put(priv, e);
+ return err;
++
++out_err_init:
++ mutex_unlock(&esw->offloads.encap_tbl_lock);
++ kfree(tun_info);
++ kfree(e);
++ return err;
+ }
+
+ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
+@@ -3295,7 +3313,9 @@ static int parse_tc_fdb_actions(struct m
+ } else if (encap) {
+ parse_attr->mirred_ifindex[attr->out_count] =
+ out_dev->ifindex;
+- parse_attr->tun_info[attr->out_count] = info;
++ parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
++ if (!parse_attr->tun_info[attr->out_count])
++ return -ENOMEM;
+ encap = false;
+ attr->dests[attr->out_count].flags |=
+ MLX5_ESW_DEST_ENCAP;
diff --git a/patches.suse/net-mlx5e-Fix-ethtool-self-test-link-speed.patch b/patches.suse/net-mlx5e-Fix-ethtool-self-test-link-speed.patch
new file mode 100644
index 0000000000..1497162d9e
--- /dev/null
+++ b/patches.suse/net-mlx5e-Fix-ethtool-self-test-link-speed.patch
@@ -0,0 +1,58 @@
+From: Aya Levin <ayal@mellanox.com>
+Date: Wed, 2 Oct 2019 16:53:21 +0300
+Subject: net/mlx5e: Fix ethtool self test: link speed
+Patch-mainline: v5.4-rc6
+Git-commit: 534e7366f41b0c689b01af4375aefcd1462adedf
+References: jsc#SLE-8464
+
+Ethtool self test contains a test for link speed. This test reads the
+PTYS register and determines whether the current speed is valid or not.
+Change current implementation to use the function mlx5e_port_linkspeed()
+that does the same check and fails when speed is invalid. This code
+redundancy lead to a bug when mlx5e_port_linkspeed() was updated with
+expended speeds and the self test was not.
+
+Fixes: 2c81bfd5ae56 ("net/mlx5e: Move port speed code from en_ethtool.c to en/port.c")
+Signed-off-by: Aya Levin <ayal@mellanox.com>
+Reviewed-by: Moshe Shemesh <moshe@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c | 15 +++------------
+ 1 file changed, 3 insertions(+), 12 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+@@ -35,6 +35,7 @@
+ #include <linux/udp.h>
+ #include <net/udp.h>
+ #include "en.h"
++#include "en/port.h"
+
+ enum {
+ MLX5E_ST_LINK_STATE,
+@@ -80,22 +81,12 @@ static int mlx5e_test_link_state(struct
+
+ static int mlx5e_test_link_speed(struct mlx5e_priv *priv)
+ {
+- u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+- u32 eth_proto_oper;
+- int i;
++ u32 speed;
+
+ if (!netif_carrier_ok(priv->netdev))
+ return 1;
+
+- if (mlx5_query_port_ptys(priv->mdev, out, sizeof(out), MLX5_PTYS_EN, 1))
+- return 1;
+-
+- eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
+- for (i = 0; i < MLX5E_LINK_MODES_NUMBER; i++) {
+- if (eth_proto_oper & MLX5E_PROT_MASK(i))
+- return 0;
+- }
+- return 1;
++ return mlx5e_port_linkspeed(priv->mdev, &speed);
+ }
+
+ struct mlx5ehdr {
diff --git a/patches.suse/net-mlx5e-Fix-handling-of-compressed-CQEs-in-case-of.patch b/patches.suse/net-mlx5e-Fix-handling-of-compressed-CQEs-in-case-of.patch
new file mode 100644
index 0000000000..fb03a15fb8
--- /dev/null
+++ b/patches.suse/net-mlx5e-Fix-handling-of-compressed-CQEs-in-case-of.patch
@@ -0,0 +1,55 @@
+From: Maxim Mikityanskiy <maximmi@mellanox.com>
+Date: Mon, 16 Sep 2019 14:54:20 +0300
+Subject: net/mlx5e: Fix handling of compressed CQEs in case of low NAPI budget
+Patch-mainline: v5.4-rc6
+Git-commit: 9df86bdb6746d7fcfc2fda715f7a7c3d0ddb2654
+References: jsc#SLE-8464
+
+When CQE compression is enabled, compressed CQEs use the following
+structure: a title is followed by one or many blocks, each containing 8
+mini CQEs (except the last, which may contain fewer mini CQEs).
+
+Due to NAPI budget restriction, a complete structure is not always
+parsed in one NAPI run, and some blocks with mini CQEs may be deferred
+to the next NAPI poll call - we have the mlx5e_decompress_cqes_cont call
+in the beginning of mlx5e_poll_rx_cq. However, if the budget is
+extremely low, some blocks may be left even after that, but the code
+that follows the mlx5e_decompress_cqes_cont call doesn't check it and
+assumes that a new CQE begins, which may not be the case. In such cases,
+random memory corruptions occur.
+
+An extremely low NAPI budget of 8 is used when busy_poll or busy_read is
+active.
+
+This commit adds a check to make sure that the previous compressed CQE
+has been completely parsed after mlx5e_decompress_cqes_cont, otherwise
+it prevents a new CQE from being fetched in the middle of a compressed
+CQE.
+
+This commit fixes random crashes in __build_skb, __page_pool_put_page
+and other not-related-directly places, that used to happen when both CQE
+compression and busy_poll/busy_read were enabled.
+
+Fixes: 7219ab34f184 ("net/mlx5e: CQE compression")
+Signed-off-by: Maxim Mikityanskiy <maximmi@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -1386,8 +1386,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq
+ if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
+ return 0;
+
+- if (rq->cqd.left)
++ if (rq->cqd.left) {
+ work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget);
++ if (rq->cqd.left || work_done >= budget)
++ goto out;
++ }
+
+ cqe = mlx5_cqwq_get_cqe(cqwq);
+ if (!cqe) {
diff --git a/patches.suse/net-mlx5e-Initialize-on-stack-link-modes-bitmap.patch b/patches.suse/net-mlx5e-Initialize-on-stack-link-modes-bitmap.patch
new file mode 100644
index 0000000000..f9c7ac8726
--- /dev/null
+++ b/patches.suse/net-mlx5e-Initialize-on-stack-link-modes-bitmap.patch
@@ -0,0 +1,29 @@
+From: Aya Levin <ayal@mellanox.com>
+Date: Wed, 23 Oct 2019 12:57:54 +0300
+Subject: net/mlx5e: Initialize on stack link modes bitmap
+Patch-mainline: v5.4-rc6
+Git-commit: 926b37f76fb0a22fe93c8873c819fd167180e85c
+References: jsc#SLE-8464
+
+Initialize link modes bitmap on stack before using it, otherwise the
+outcome of ethtool set link ksettings might have unexpected values.
+
+Fixes: 4b95840a6ced ("net/mlx5e: Fix matching of speed to PRM link modes")
+Signed-off-by: Aya Levin <ayal@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -1021,7 +1021,7 @@ static bool ext_link_mode_requested(cons
+ {
+ #define MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT ETHTOOL_LINK_MODE_50000baseKR_Full_BIT
+ int size = __ETHTOOL_LINK_MODE_MASK_NBITS - MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT;
+- __ETHTOOL_DECLARE_LINK_MODE_MASK(modes);
++ __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = {0,};
+
+ bitmap_set(modes, MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT, size);
+ return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS);
diff --git a/patches.suse/net-mlx5e-Only-skip-encap-flows-update-when-encap-in.patch b/patches.suse/net-mlx5e-Only-skip-encap-flows-update-when-encap-in.patch
new file mode 100644
index 0000000000..3bff0bb84d
--- /dev/null
+++ b/patches.suse/net-mlx5e-Only-skip-encap-flows-update-when-encap-in.patch
@@ -0,0 +1,34 @@
+From: Vlad Buslov <vladbu@mellanox.com>
+Date: Tue, 24 Sep 2019 10:19:16 +0300
+Subject: net/mlx5e: Only skip encap flows update when encap init failed
+Patch-mainline: v5.4-rc6
+Git-commit: 64d7b68577130ae00f954a28ea9d6bc51025caf9
+References: jsc#SLE-8464
+
+When encap entry initialization completes successfully e->compl_result is
+set to positive value and not zero, like mlx5e_rep_update_flows() assumes
+at the moment. Fix the conditional to only skip encap flows update when
+e->compl_result < 0.
+
+Fixes: 2a1f1768fa17 ("net/mlx5e: Refactor neigh update for concurrent execution")
+Signed-off-by: Vlad Buslov <vladbu@mellanox.com>
+Reviewed-by: Roi Dayan <roid@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -611,8 +611,8 @@ static void mlx5e_rep_update_flows(struc
+
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
+- if (e->compl_result || (encap_connected == neigh_connected &&
+- ether_addr_equal(e->h_dest, ha)))
++ if (e->compl_result < 0 || (encap_connected == neigh_connected &&
++ ether_addr_equal(e->h_dest, ha)))
+ goto unlock;
+
+ mlx5e_take_all_encap_flows(e, &flow_list);
diff --git a/patches.suse/net-mlx5e-Remove-incorrect-match-criteria-assignment.patch b/patches.suse/net-mlx5e-Remove-incorrect-match-criteria-assignment.patch
new file mode 100644
index 0000000000..ec57d27f30
--- /dev/null
+++ b/patches.suse/net-mlx5e-Remove-incorrect-match-criteria-assignment.patch
@@ -0,0 +1,31 @@
+From: Dmytro Linkin <dmitrolin@mellanox.com>
+Date: Thu, 29 Aug 2019 15:24:27 +0000
+Subject: net/mlx5e: Remove incorrect match criteria assignment line
+Patch-mainline: v5.4-rc6
+Git-commit: 752d3dc06d6936d5a357a18b6b51d91c7e134e88
+References: jsc#SLE-8464
+
+Driver have function, which enable match criteria for misc parameters
+in dependence of eswitch capabilities.
+
+Fixes: 4f5d1beadc10 ("Merge branch 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux")
+Signed-off-by: Dmytro Linkin <dmitrolin@mellanox.com>
+Reviewed-by: Jianbo Liu <jianbol@mellanox.com>
+Reviewed-by: Roi Dayan <roid@mellanox.com>
+Reviewed-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -285,7 +285,6 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_es
+
+ mlx5_eswitch_set_rule_source_port(esw, spec, attr);
+
+- spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+ if (attr->outer_match_level != MLX5_MATCH_NONE)
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+
diff --git a/patches.suse/net-mlx5e-Replace-kfree-with-kvfree-when-free-vhca-s.patch b/patches.suse/net-mlx5e-Replace-kfree-with-kvfree-when-free-vhca-s.patch
new file mode 100644
index 0000000000..26a9823115
--- /dev/null
+++ b/patches.suse/net-mlx5e-Replace-kfree-with-kvfree-when-free-vhca-s.patch
@@ -0,0 +1,35 @@
+From: Maor Gottlieb <maorg@mellanox.com>
+Date: Mon, 16 Sep 2019 13:17:33 +0300
+Subject: net/mlx5e: Replace kfree with kvfree when free vhca stats
+Patch-mainline: v5.4-rc6
+Git-commit: 5dfb6335cbecbd59040275c8396c2d0af0bbd549
+References: jsc#SLE-8464
+
+Memory allocated by kvzalloc should be freed by kvfree.
+
+Fixes: cef35af34d6d ("net/mlx5e: Add mlx5e HV VHCA stats agent")
+Signed-off-by: Maor Gottlieb <maorg@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
+@@ -141,7 +141,7 @@ int mlx5e_hv_vhca_stats_create(struct ml
+ "Failed to create hv vhca stats agent, err = %ld\n",
+ PTR_ERR(agent));
+
+- kfree(priv->stats_agent.buf);
++ kvfree(priv->stats_agent.buf);
+ return IS_ERR_OR_NULL(agent);
+ }
+
+@@ -157,5 +157,5 @@ void mlx5e_hv_vhca_stats_destroy(struct
+ return;
+
+ mlx5_hv_vhca_agent_destroy(priv->stats_agent.agent);
+- kfree(priv->stats_agent.buf);
++ kvfree(priv->stats_agent.buf);
+ }
diff --git a/patches.suse/net-mlx5e-TX-Fix-consumer-index-of-error-cqe-dump.patch b/patches.suse/net-mlx5e-TX-Fix-consumer-index-of-error-cqe-dump.patch
new file mode 100644
index 0000000000..4ba1cd9efc
--- /dev/null
+++ b/patches.suse/net-mlx5e-TX-Fix-consumer-index-of-error-cqe-dump.patch
@@ -0,0 +1,34 @@
+From: Tariq Toukan <tariqt@mellanox.com>
+Date: Tue, 24 Sep 2019 11:29:09 +0300
+Subject: net/mlx5e: TX, Fix consumer index of error cqe dump
+Patch-mainline: v5.4-rc6
+Git-commit: 61ea02d2c13106116c6e4916ac5d9dd41151c959
+References: jsc#SLE-8464
+
+The completion queue consumer index increments upon a call to
+mlx5_cqwq_pop().
+When dumping an error CQE, the index is already incremented.
+Decrease one for the print command.
+
+Fixes: 16cc14d81733 ("net/mlx5e: Dump xmit error completions")
+Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+@@ -403,7 +403,10 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *s
+ static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
+ struct mlx5_err_cqe *err_cqe)
+ {
+- u32 ci = mlx5_cqwq_get_ci(&sq->cq.wq);
++ struct mlx5_cqwq *wq = &sq->cq.wq;
++ u32 ci;
++
++ ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
+
+ netdev_err(sq->channel->netdev,
+ "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
diff --git a/patches.suse/net-mlx5e-Tx-Fix-assumption-of-single-WQEBB-of-NOP-i.patch b/patches.suse/net-mlx5e-Tx-Fix-assumption-of-single-WQEBB-of-NOP-i.patch
new file mode 100644
index 0000000000..99cf79cf90
--- /dev/null
+++ b/patches.suse/net-mlx5e-Tx-Fix-assumption-of-single-WQEBB-of-NOP-i.patch
@@ -0,0 +1,50 @@
+From: Tariq Toukan <tariqt@mellanox.com>
+Date: Mon, 16 Sep 2019 17:43:33 +0300
+Subject: net/mlx5e: Tx, Fix assumption of single WQEBB of NOP in cleanup flow
+Patch-mainline: v5.4-rc6
+Git-commit: 0c258dec8d98af15b34dbffdb89c008b6da01ff8
+References: jsc#SLE-8464
+
+Cited patch removed the assumption only in datapath.
+Here we remove it also form control/cleanup flow.
+
+Fixes: 9ab0233728ca ("net/mlx5e: Tx, Don't implicitly assume SKB-less wqe has one WQEBB")
+Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
+Reviewed-by: Eran Ben Elisha <eranbe@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 6 +++++-
+ drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 4 ++--
+ 2 files changed, 7 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -1349,9 +1349,13 @@ static void mlx5e_deactivate_txqsq(struc
+ /* last doorbell out, godspeed .. */
+ if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
++ struct mlx5e_tx_wqe_info *wi;
+ struct mlx5e_tx_wqe *nop;
+
+- sq->db.wqe_info[pi].skb = NULL;
++ wi = &sq->db.wqe_info[pi];
++
++ memset(wi, 0, sizeof(*wi));
++ wi->num_wqebbs = 1;
+ nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+ mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
+ }
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+@@ -550,8 +550,8 @@ void mlx5e_free_txqsq_descs(struct mlx5e
+ wi = &sq->db.wqe_info[ci];
+ skb = wi->skb;
+
+- if (!skb) { /* nop */
+- sq->cc++;
++ if (!skb) {
++ sq->cc += wi->num_wqebbs;
+ continue;
+ }
+
diff --git a/patches.suse/net-mlx5e-Tx-Zero-memset-WQE-info-struct-upon-update.patch b/patches.suse/net-mlx5e-Tx-Zero-memset-WQE-info-struct-upon-update.patch
new file mode 100644
index 0000000000..e3a579e0dc
--- /dev/null
+++ b/patches.suse/net-mlx5e-Tx-Zero-memset-WQE-info-struct-upon-update.patch
@@ -0,0 +1,34 @@
+From: Tariq Toukan <tariqt@mellanox.com>
+Date: Mon, 16 Sep 2019 17:19:12 +0300
+Subject: net/mlx5e: Tx, Zero-memset WQE info struct upon update
+Patch-mainline: v5.4-rc6
+Git-commit: 500f36a485862cee15752b58a5a9a50c1f59ff58
+References: jsc#SLE-8464
+
+Not all fields of WQE info are being written in the function,
+having some leftovers from previous rounds.
+Zero-memset it upon update.
+
+Particularly, not nullifying the wi->resync_dump_frag field
+will cause double free of the kTLS DUMPed frags.
+
+Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support")
+Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
+Reviewed-by: Eran Ben Elisha <eranbe@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+@@ -92,7 +92,7 @@ mlx5e_fill_sq_frag_edge(struct mlx5e_txq
+
+ /* fill sq frag edge with nops to avoid wqe wrapping two pages */
+ for (; wi < edge_wi; wi++) {
+- wi->skb = NULL;
++ memset(wi, 0, sizeof(*wi));
+ wi->num_wqebbs = 1;
+ mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+ }
diff --git a/patches.suse/net-mlx5e-kTLS-Enhance-TX-resync-flow.patch b/patches.suse/net-mlx5e-kTLS-Enhance-TX-resync-flow.patch
new file mode 100644
index 0000000000..ba3c14aa30
--- /dev/null
+++ b/patches.suse/net-mlx5e-kTLS-Enhance-TX-resync-flow.patch
@@ -0,0 +1,247 @@
+From: Tariq Toukan <tariqt@mellanox.com>
+Date: Thu, 3 Oct 2019 10:48:10 +0300
+Subject: net/mlx5e: kTLS, Enhance TX resync flow
+Patch-mainline: v5.4-rc6
+Git-commit: 46a3ea98074e2a7731ab9b84ec60fc18a2f909e5
+References: jsc#SLE-8464
+
+Once the kTLS TX resync function is called, it used to return
+a binary value, for success or failure.
+
+However, in case the TLS SKB is a retransmission of the connection
+handshake, it initiates the resync flow (as the tcp seq check holds),
+while regular packet handle is expected.
+
+In this patch, we identify this case and skip the resync operation
+accordingly.
+
+Counters:
+- Add a counter (tls_skip_no_sync_data) to monitor this.
+- Bump the dump counters up as they are used more frequently.
+- Add a missing counter descriptor declaration for tls_resync_bytes
+ in sq_stats_desc.
+
+Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support")
+Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 58 +++++++------
+ drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 16 ++-
+ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 10 +-
+ 3 files changed, 51 insertions(+), 33 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+@@ -185,26 +185,33 @@ struct tx_sync_info {
+ skb_frag_t frags[MAX_SKB_FRAGS];
+ };
+
+-static bool tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
+- u32 tcp_seq, struct tx_sync_info *info)
++enum mlx5e_ktls_sync_retval {
++ MLX5E_KTLS_SYNC_DONE,
++ MLX5E_KTLS_SYNC_FAIL,
++ MLX5E_KTLS_SYNC_SKIP_NO_DATA,
++};
++
++static enum mlx5e_ktls_sync_retval
++tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
++ u32 tcp_seq, struct tx_sync_info *info)
+ {
+ struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
++ enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
+ struct tls_record_info *record;
+ int remaining, i = 0;
+ unsigned long flags;
+- bool ret = true;
+
+ spin_lock_irqsave(&tx_ctx->lock, flags);
+ record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
+
+ if (unlikely(!record)) {
+- ret = false;
++ ret = MLX5E_KTLS_SYNC_FAIL;
+ goto out;
+ }
+
+ if (unlikely(tcp_seq < tls_record_start_seq(record))) {
+- if (!tls_record_is_start_marker(record))
+- ret = false;
++ ret = tls_record_is_start_marker(record) ?
++ MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
+ goto out;
+ }
+
+@@ -316,20 +323,26 @@ static void tx_post_fence_nop(struct mlx
+ mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
+ }
+
+-static struct sk_buff *
++static enum mlx5e_ktls_sync_retval
+ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
+ struct mlx5e_txqsq *sq,
+- struct sk_buff *skb,
++ int datalen,
+ u32 seq)
+ {
+ struct mlx5e_sq_stats *stats = sq->stats;
+ struct mlx5_wq_cyc *wq = &sq->wq;
++ enum mlx5e_ktls_sync_retval ret;
+ struct tx_sync_info info = {};
+ u16 contig_wqebbs_room, pi;
+ u8 num_wqebbs;
+ int i = 0;
+
+- if (!tx_sync_info_get(priv_tx, seq, &info)) {
++ ret = tx_sync_info_get(priv_tx, seq, &info);
++ if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
++ if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
++ stats->tls_skip_no_sync_data++;
++ return MLX5E_KTLS_SYNC_SKIP_NO_DATA;
++ }
+ /* We might get here if a retransmission reaches the driver
+ * after the relevant record is acked.
+ * It should be safe to drop the packet in this case
+@@ -339,13 +352,8 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_kt
+ }
+
+ if (unlikely(info.sync_len < 0)) {
+- u32 payload;
+- int headln;
+-
+- headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
+- payload = skb->len - headln;
+- if (likely(payload <= -info.sync_len))
+- return skb;
++ if (likely(datalen <= -info.sync_len))
++ return MLX5E_KTLS_SYNC_DONE;
+
+ stats->tls_drop_bypass_req++;
+ goto err_out;
+@@ -360,7 +368,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_kt
+ */
+ if (!info.nr_frags) {
+ tx_post_fence_nop(sq);
+- return skb;
++ return MLX5E_KTLS_SYNC_DONE;
+ }
+
+ num_wqebbs = mlx5e_ktls_dumps_num_wqebbs(sq, info.nr_frags, info.sync_len);
+@@ -397,7 +405,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_kt
+ page_ref_add(skb_frag_page(f), n - 1);
+ }
+
+- return skb;
++ return MLX5E_KTLS_SYNC_DONE;
+
+ err_out:
+ for (; i < info.nr_frags; i++)
+@@ -408,8 +416,7 @@ err_out:
+ */
+ put_page(skb_frag_page(&info.frags[i]));
+
+- dev_kfree_skb_any(skb);
+- return NULL;
++ return MLX5E_KTLS_SYNC_FAIL;
+ }
+
+ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
+@@ -445,10 +452,15 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb
+
+ seq = ntohl(tcp_hdr(skb)->seq);
+ if (unlikely(priv_tx->expected_seq != seq)) {
+- skb = mlx5e_ktls_tx_handle_ooo(priv_tx, sq, skb, seq);
+- if (unlikely(!skb))
++ enum mlx5e_ktls_sync_retval ret =
++ mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
++
++ if (likely(ret == MLX5E_KTLS_SYNC_DONE))
++ *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
++ else if (ret == MLX5E_KTLS_SYNC_FAIL)
++ goto err_out;
++ else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */
+ goto out;
+- *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
+ }
+
+ priv_tx->expected_seq = seq + datalen;
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+@@ -52,11 +52,12 @@ static const struct counter_desc sw_stat
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
++ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
++ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
++ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
+- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
+- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
+ #endif
+
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
+@@ -288,11 +289,12 @@ static void mlx5e_grp_sw_update_stats(st
+ s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
+ s->tx_tls_ctx += sq_stats->tls_ctx;
+ s->tx_tls_ooo += sq_stats->tls_ooo;
++ s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
++ s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
+ s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
++ s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
+ s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
+ s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
+- s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
+- s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
+ #endif
+ s->tx_cqes += sq_stats->cqes;
+ }
+@@ -1472,10 +1474,12 @@ static const struct counter_desc sq_stat
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
+- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
+- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
++ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
++ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
++ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
++ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
+ #endif
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+@@ -129,11 +129,12 @@ struct mlx5e_sw_stats {
+ u64 tx_tls_encrypted_bytes;
+ u64 tx_tls_ctx;
+ u64 tx_tls_ooo;
++ u64 tx_tls_dump_packets;
++ u64 tx_tls_dump_bytes;
+ u64 tx_tls_resync_bytes;
++ u64 tx_tls_skip_no_sync_data;
+ u64 tx_tls_drop_no_sync_data;
+ u64 tx_tls_drop_bypass_req;
+- u64 tx_tls_dump_packets;
+- u64 tx_tls_dump_bytes;
+ #endif
+
+ u64 rx_xsk_packets;
+@@ -273,11 +274,12 @@ struct mlx5e_sq_stats {
+ u64 tls_encrypted_bytes;
+ u64 tls_ctx;
+ u64 tls_ooo;
++ u64 tls_dump_packets;
++ u64 tls_dump_bytes;
+ u64 tls_resync_bytes;
++ u64 tls_skip_no_sync_data;
+ u64 tls_drop_no_sync_data;
+ u64 tls_drop_bypass_req;
+- u64 tls_dump_packets;
+- u64 tls_dump_bytes;
+ #endif
+ /* less likely accessed in data path */
+ u64 csum_none;
diff --git a/patches.suse/net-mlx5e-kTLS-Fix-missing-SQ-edge-fill.patch b/patches.suse/net-mlx5e-kTLS-Fix-missing-SQ-edge-fill.patch
new file mode 100644
index 0000000000..c4d011545c
--- /dev/null
+++ b/patches.suse/net-mlx5e-kTLS-Fix-missing-SQ-edge-fill.patch
@@ -0,0 +1,75 @@
+From: Tariq Toukan <tariqt@mellanox.com>
+Date: Mon, 7 Oct 2019 13:59:11 +0300
+Subject: net/mlx5e: kTLS, Fix missing SQ edge fill
+Patch-mainline: v5.4-rc6
+Git-commit: 700ec497424069fa4d8f3715759c4aaec016e840
+References: jsc#SLE-8464
+
+Before posting the context params WQEs, make sure there is enough
+contiguous room for them, and fill frag edge if needed.
+
+When posting only a nop, no need for room check, as it needs a single
+WQEBB, meaning no contiguity issue.
+
+Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support")
+Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
+Reviewed-by: Eran Ben Elisha <eranbe@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 28 +++++++++----
+ 1 file changed, 20 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+@@ -168,6 +168,14 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx
+ bool skip_static_post, bool fence_first_post)
+ {
+ bool progress_fence = skip_static_post || !fence_first_post;
++ struct mlx5_wq_cyc *wq = &sq->wq;
++ u16 contig_wqebbs_room, pi;
++
++ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
++ contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
++ if (unlikely(contig_wqebbs_room <
++ MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS))
++ mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
+
+ if (!skip_static_post)
+ post_static_params(sq, priv_tx, fence_first_post);
+@@ -355,10 +363,20 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_kt
+
+ stats->tls_ooo++;
+
+- num_wqebbs = MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS +
+- (info.nr_frags ? info.nr_frags * MLX5E_KTLS_DUMP_WQEBBS : 1);
++ tx_post_resync_params(sq, priv_tx, info.rcd_sn);
++
++ /* If no dump WQE was sent, we need to have a fence NOP WQE before the
++ * actual data xmit.
++ */
++ if (!info.nr_frags) {
++ tx_post_fence_nop(sq);
++ return skb;
++ }
++
++ num_wqebbs = info.nr_frags * MLX5E_KTLS_DUMP_WQEBBS;
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
++
+ if (unlikely(contig_wqebbs_room < num_wqebbs))
+ mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
+
+@@ -368,12 +386,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_kt
+ if (tx_post_resync_dump(sq, &info.frags[i], priv_tx->tisn, !i))
+ goto err_out;
+
+- /* If no dump WQE was sent, we need to have a fence NOP WQE before the
+- * actual data xmit.
+- */
+- if (!info.nr_frags)
+- tx_post_fence_nop(sq);
+-
+ return skb;
+
+ err_out:
diff --git a/patches.suse/net-mlx5e-kTLS-Fix-page-refcnt-leak-in-TX-resync-err.patch b/patches.suse/net-mlx5e-kTLS-Fix-page-refcnt-leak-in-TX-resync-err.patch
new file mode 100644
index 0000000000..2f974b1233
--- /dev/null
+++ b/patches.suse/net-mlx5e-kTLS-Fix-page-refcnt-leak-in-TX-resync-err.patch
@@ -0,0 +1,53 @@
+From: Tariq Toukan <tariqt@mellanox.com>
+Date: Wed, 18 Sep 2019 13:57:40 +0300
+Subject: net/mlx5e: kTLS, Fix page refcnt leak in TX resync error flow
+Patch-mainline: v5.4-rc6
+Git-commit: b61b24bd135a7775a2839863bd1d58a462a5f1e5
+References: jsc#SLE-8464
+
+All references for frag pages that are obtained in tx_sync_info_get()
+should be released.
+Release usually occurs in the corresponding CQE of the WQE.
+In error flows, not all fragments have a WQE posted for them, hence
+no matching CQE will be generated.
+For these pages, release the reference in the error flow.
+
+Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support")
+Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
+Reviewed-by: Eran Ben Elisha <eranbe@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+@@ -329,7 +329,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_kt
+ struct tx_sync_info info = {};
+ u16 contig_wqebbs_room, pi;
+ u8 num_wqebbs;
+- int i;
++ int i = 0;
+
+ if (!tx_sync_info_get(priv_tx, seq, &info)) {
+ /* We might get here if a retransmission reaches the driver
+@@ -364,7 +364,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_kt
+
+ tx_post_resync_params(sq, priv_tx, info.rcd_sn);
+
+- for (i = 0; i < info.nr_frags; i++)
++ for (; i < info.nr_frags; i++)
+ if (tx_post_resync_dump(sq, &info.frags[i], priv_tx->tisn, !i))
+ goto err_out;
+
+@@ -377,6 +377,9 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_kt
+ return skb;
+
+ err_out:
++ for (; i < info.nr_frags; i++)
++ put_page(skb_frag_page(&info.frags[i]));
++
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
diff --git a/patches.suse/net-mlx5e-kTLS-Limit-DUMP-wqe-size.patch b/patches.suse/net-mlx5e-kTLS-Limit-DUMP-wqe-size.patch
new file mode 100644
index 0000000000..61975e08c3
--- /dev/null
+++ b/patches.suse/net-mlx5e-kTLS-Limit-DUMP-wqe-size.patch
@@ -0,0 +1,163 @@
+From: Tariq Toukan <tariqt@mellanox.com>
+Date: Mon, 7 Oct 2019 14:01:29 +0300
+Subject: net/mlx5e: kTLS, Limit DUMP wqe size
+Patch-mainline: v5.4-rc6
+Git-commit: 84d1bb2b139e0184b1754aa1b5776186b475fce8
+References: jsc#SLE-8464
+
+HW expects the data size in DUMP WQEs to be up to MTU.
+Make sure they are in range.
+
+We elevate the frag page refcount by 'n-1', in addition to the
+one obtained in tx_sync_info_get(), having an overall of 'n'
+references. We bulk increments by using a single page_ref_add()
+command, to optimize perfermance.
+The refcounts are released one by one, by the corresponding completions.
+
+Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support")
+Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
+Reviewed-by: Eran Ben Elisha <eranbe@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en.h | 1
+ drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h | 11 +---
+ drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h | 11 +++-
+ drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 34 +++++++++++--
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 7 ++
+ 5 files changed, 52 insertions(+), 12 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+@@ -410,6 +410,7 @@ struct mlx5e_txqsq {
+ struct device *pdev;
+ __be32 mkey_be;
+ unsigned long state;
++ unsigned int hw_mtu;
+ struct hwtstamp_config *tstamp;
+ struct mlx5_clock *clock;
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+@@ -15,15 +15,14 @@
+ #else
+ /* TLS offload requires additional stop_room for:
+ * - a resync SKB.
+- * kTLS offload requires additional stop_room for:
+- * - static params WQE,
+- * - progress params WQE, and
+- * - resync DUMP per frag.
++ * kTLS offload requires fixed additional stop_room for:
++ * - a static params WQE, and a progress params WQE.
++ * The additional MTU-depending room for the resync DUMP WQEs
++ * will be calculated and added in runtime.
+ */
+ #define MLX5E_SQ_TLS_ROOM \
+ (MLX5_SEND_WQE_MAX_WQEBBS + \
+- MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS + \
+- MAX_SKB_FRAGS * MLX5E_KTLS_DUMP_WQEBBS)
++ MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS)
+ #endif
+
+ #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+@@ -94,7 +94,16 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb
+ void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
+ struct mlx5e_tx_wqe_info *wi,
+ u32 *dma_fifo_cc);
+-
++static inline u8
++mlx5e_ktls_dumps_num_wqebbs(struct mlx5e_txqsq *sq, unsigned int nfrags,
++ unsigned int sync_len)
++{
++ /* Given the MTU and sync_len, calculates an upper bound for the
++ * number of WQEBBs needed for the TX resync DUMP WQEs of a record.
++ */
++ return MLX5E_KTLS_DUMP_WQEBBS *
++ (nfrags + DIV_ROUND_UP(sync_len, sq->hw_mtu));
++}
+ #else
+
+ static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+@@ -373,7 +373,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_kt
+ return skb;
+ }
+
+- num_wqebbs = info.nr_frags * MLX5E_KTLS_DUMP_WQEBBS;
++ num_wqebbs = mlx5e_ktls_dumps_num_wqebbs(sq, info.nr_frags, info.sync_len);
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+
+@@ -382,14 +382,40 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_kt
+
+ tx_post_resync_params(sq, priv_tx, info.rcd_sn);
+
+- for (; i < info.nr_frags; i++)
+- if (tx_post_resync_dump(sq, &info.frags[i], priv_tx->tisn, !i))
+- goto err_out;
++ for (; i < info.nr_frags; i++) {
++ unsigned int orig_fsz, frag_offset = 0, n = 0;
++ skb_frag_t *f = &info.frags[i];
++
++ orig_fsz = skb_frag_size(f);
++
++ do {
++ bool fence = !(i || frag_offset);
++ unsigned int fsz;
++
++ n++;
++ fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
++ skb_frag_size_set(f, fsz);
++ if (tx_post_resync_dump(sq, f, priv_tx->tisn, fence)) {
++ page_ref_add(skb_frag_page(f), n - 1);
++ goto err_out;
++ }
++
++ skb_frag_off_add(f, fsz);
++ frag_offset += fsz;
++ } while (frag_offset < orig_fsz);
++
++ page_ref_add(skb_frag_page(f), n - 1);
++ }
+
+ return skb;
+
+ err_out:
+ for (; i < info.nr_frags; i++)
++ /* The put_page() here undoes the page ref obtained in tx_sync_info_get().
++ * Page refs obtained for the DUMP WQEs above (by page_ref_add) will be
++ * released only upon their completions (or in mlx5e_free_txqsq_descs,
++ * if channel closes).
++ */
+ put_page(skb_frag_page(&info.frags[i]));
+
+ dev_kfree_skb_any(skb);
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -1128,6 +1128,7 @@ static int mlx5e_alloc_txqsq(struct mlx5
+ sq->txq_ix = txq_ix;
+ sq->uar_map = mdev->mlx5e_res.bfreg.map;
+ sq->min_inline_mode = params->tx_min_inline_mode;
++ sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
+ sq->stop_room = MLX5E_SQ_STOP_ROOM;
+ INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
+@@ -1135,10 +1136,14 @@ static int mlx5e_alloc_txqsq(struct mlx5
+ set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
+ if (MLX5_IPSEC_DEV(c->priv->mdev))
+ set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
++#ifdef CONFIG_MLX5_EN_TLS
+ if (mlx5_accel_is_tls_device(c->priv->mdev)) {
+ set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
+- sq->stop_room += MLX5E_SQ_TLS_ROOM;
++ sq->stop_room += MLX5E_SQ_TLS_ROOM +
++ mlx5e_ktls_dumps_num_wqebbs(sq, MAX_SKB_FRAGS,
++ TLS_MAX_PAYLOAD_SIZE);
+ }
++#endif
+
+ param->wq.db_numa_node = cpu_to_node(c->cpu);
+ err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
diff --git a/patches.suse/net-mlx5e-kTLS-Release-reference-on-DUMPed-fragments.patch b/patches.suse/net-mlx5e-kTLS-Release-reference-on-DUMPed-fragments.patch
new file mode 100644
index 0000000000..cf06f7e0f6
--- /dev/null
+++ b/patches.suse/net-mlx5e-kTLS-Release-reference-on-DUMPed-fragments.patch
@@ -0,0 +1,128 @@
+From: Tariq Toukan <tariqt@mellanox.com>
+Date: Wed, 18 Sep 2019 13:45:38 +0300
+Subject: net/mlx5e: kTLS, Release reference on DUMPed fragments in shutdown
+ flow
+Patch-mainline: v5.4-rc6
+Git-commit: 2c559361389b452ca23494080d0c65ab812706c1
+References: jsc#SLE-8464
+
+A call to kTLS completion handler was missing in the TXQSQ release
+flow. Add it.
+
+Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support")
+Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
+Reviewed-by: Eran Ben Elisha <eranbe@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h | 7 ++-
+ drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 11 ++++-
+ drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 28 ++++++-------
+ 3 files changed, 30 insertions(+), 16 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+@@ -86,7 +86,7 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb
+ struct mlx5e_tx_wqe **wqe, u16 *pi);
+ void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
+ struct mlx5e_tx_wqe_info *wi,
+- struct mlx5e_sq_dma *dma);
++ u32 *dma_fifo_cc);
+
+ #else
+
+@@ -94,6 +94,11 @@ static inline void mlx5e_ktls_build_netd
+ {
+ }
+
++static inline void
++mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
++ struct mlx5e_tx_wqe_info *wi,
++ u32 *dma_fifo_cc) {}
++
+ #endif
+
+ #endif /* __MLX5E_TLS_H__ */
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+@@ -303,9 +303,16 @@ tx_post_resync_dump(struct mlx5e_txqsq *
+
+ void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
+ struct mlx5e_tx_wqe_info *wi,
+- struct mlx5e_sq_dma *dma)
++ u32 *dma_fifo_cc)
+ {
+- struct mlx5e_sq_stats *stats = sq->stats;
++ struct mlx5e_sq_stats *stats;
++ struct mlx5e_sq_dma *dma;
++
++ if (!wi->resync_dump_frag)
++ return;
++
++ dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
++ stats = sq->stats;
+
+ mlx5e_tx_dma_unmap(sq->pdev, dma);
+ __skb_frag_unref(wi->resync_dump_frag);
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+@@ -479,14 +479,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *c
+ skb = wi->skb;
+
+ if (unlikely(!skb)) {
+-#ifdef CONFIG_MLX5_EN_TLS
+- if (wi->resync_dump_frag) {
+- struct mlx5e_sq_dma *dma =
+- mlx5e_dma_get(sq, dma_fifo_cc++);
+-
+- mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, dma);
+- }
+-#endif
++ mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc);
+ sqcc += wi->num_wqebbs;
+ continue;
+ }
+@@ -542,29 +535,38 @@ void mlx5e_free_txqsq_descs(struct mlx5e
+ {
+ struct mlx5e_tx_wqe_info *wi;
+ struct sk_buff *skb;
++ u32 dma_fifo_cc;
++ u16 sqcc;
+ u16 ci;
+ int i;
+
+- while (sq->cc != sq->pc) {
+- ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
++ sqcc = sq->cc;
++ dma_fifo_cc = sq->dma_fifo_cc;
++
++ while (sqcc != sq->pc) {
++ ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
+ wi = &sq->db.wqe_info[ci];
+ skb = wi->skb;
+
+ if (!skb) {
+- sq->cc += wi->num_wqebbs;
++ mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc);
++ sqcc += wi->num_wqebbs;
+ continue;
+ }
+
+ for (i = 0; i < wi->num_dma; i++) {
+ struct mlx5e_sq_dma *dma =
+- mlx5e_dma_get(sq, sq->dma_fifo_cc++);
++ mlx5e_dma_get(sq, dma_fifo_cc++);
+
+ mlx5e_tx_dma_unmap(sq->pdev, dma);
+ }
+
+ dev_kfree_skb_any(skb);
+- sq->cc += wi->num_wqebbs;
++ sqcc += wi->num_wqebbs;
+ }
++
++ sq->dma_fifo_cc = dma_fifo_cc;
++ sq->cc = sqcc;
+ }
+
+ #ifdef CONFIG_MLX5_CORE_IPOIB
diff --git a/patches.suse/net-mlx5e-kTLS-Remove-unneeded-cipher-type-checks.patch b/patches.suse/net-mlx5e-kTLS-Remove-unneeded-cipher-type-checks.patch
new file mode 100644
index 0000000000..51d9df16f8
--- /dev/null
+++ b/patches.suse/net-mlx5e-kTLS-Remove-unneeded-cipher-type-checks.patch
@@ -0,0 +1,41 @@
+From: Tariq Toukan <tariqt@mellanox.com>
+Date: Sun, 6 Oct 2019 18:25:17 +0300
+Subject: net/mlx5e: kTLS, Remove unneeded cipher type checks
+Patch-mainline: v5.4-rc6
+Git-commit: ecdc65a3ec5d45725355479d63c23a20f4582104
+References: jsc#SLE-8464
+
+Cipher type is checked upon connection addition.
+No need to recheck it per every TX resync invocation.
+
+Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support")
+Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
+Reviewed-by: Eran Ben Elisha <eranbe@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 6 ------
+ 1 file changed, 6 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+@@ -31,9 +31,6 @@ fill_static_params_ctx(void *ctx, struct
+ char *salt, *rec_seq;
+ u8 tls_version;
+
+- if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128))
+- return;
+-
+ info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ EXTRACT_INFO_FIELDS;
+
+@@ -243,9 +240,6 @@ tx_post_resync_params(struct mlx5e_txqsq
+ u16 rec_seq_sz;
+ char *rec_seq;
+
+- if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128))
+- return;
+-
+ info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ rec_seq = info->rec_seq;
+ rec_seq_sz = sizeof(info->rec_seq);
diff --git a/patches.suse/net-mlx5e-kTLS-Save-a-copy-of-the-crypto-info.patch b/patches.suse/net-mlx5e-kTLS-Save-a-copy-of-the-crypto-info.patch
new file mode 100644
index 0000000000..db78c03633
--- /dev/null
+++ b/patches.suse/net-mlx5e-kTLS-Save-a-copy-of-the-crypto-info.patch
@@ -0,0 +1,78 @@
+From: Tariq Toukan <tariqt@mellanox.com>
+Date: Sun, 22 Sep 2019 14:05:24 +0300
+Subject: net/mlx5e: kTLS, Save a copy of the crypto info
+Patch-mainline: v5.4-rc6
+Git-commit: af11a7a42454b17c77da5fa55b6b6325b11d60e5
+References: jsc#SLE-8464
+
+Do not assume the crypto info is accessible during the
+connection lifetime. Save a copy of it in the private
+TX context.
+
+Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support")
+Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
+Reviewed-by: Eran Ben Elisha <eranbe@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c | 2 +-
+ drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h | 2 +-
+ drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 8 ++------
+ 3 files changed, 4 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+@@ -38,7 +38,7 @@ static int mlx5e_ktls_add(struct net_dev
+ return -ENOMEM;
+
+ tx_priv->expected_seq = start_offload_tcp_sn;
+- tx_priv->crypto_info = crypto_info;
++ tx_priv->crypto_info = *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ mlx5e_set_ktls_tx_priv_ctx(tls_ctx, tx_priv);
+
+ /* tc and underlay_qpn values are not in use for tls tis */
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+@@ -44,7 +44,7 @@ enum {
+
+ struct mlx5e_ktls_offload_context_tx {
+ struct tls_offload_context_tx *tx_ctx;
+- struct tls_crypto_info *crypto_info;
++ struct tls12_crypto_info_aes_gcm_128 crypto_info;
+ u32 expected_seq;
+ u32 tisn;
+ u32 key_id;
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+@@ -24,14 +24,12 @@ enum {
+ static void
+ fill_static_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
+ {
+- struct tls_crypto_info *crypto_info = priv_tx->crypto_info;
+- struct tls12_crypto_info_aes_gcm_128 *info;
++ struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
+ char *initial_rn, *gcm_iv;
+ u16 salt_sz, rec_seq_sz;
+ char *salt, *rec_seq;
+ u8 tls_version;
+
+- info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ EXTRACT_INFO_FIELDS;
+
+ gcm_iv = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv);
+@@ -233,14 +231,12 @@ tx_post_resync_params(struct mlx5e_txqsq
+ struct mlx5e_ktls_offload_context_tx *priv_tx,
+ u64 rcd_sn)
+ {
+- struct tls_crypto_info *crypto_info = priv_tx->crypto_info;
+- struct tls12_crypto_info_aes_gcm_128 *info;
++ struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
+ __be64 rn_be = cpu_to_be64(rcd_sn);
+ bool skip_static_post;
+ u16 rec_seq_sz;
+ char *rec_seq;
+
+- info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ rec_seq = info->rec_seq;
+ rec_seq_sz = sizeof(info->rec_seq);
+
diff --git a/patches.suse/net-mlx5e-kTLS-Save-by-value-copy-of-the-record-frag.patch b/patches.suse/net-mlx5e-kTLS-Save-by-value-copy-of-the-record-frag.patch
new file mode 100644
index 0000000000..79365bbccc
--- /dev/null
+++ b/patches.suse/net-mlx5e-kTLS-Save-by-value-copy-of-the-record-frag.patch
@@ -0,0 +1,54 @@
+From: Tariq Toukan <tariqt@mellanox.com>
+Date: Wed, 18 Sep 2019 13:57:40 +0300
+Subject: net/mlx5e: kTLS, Save by-value copy of the record frags
+Patch-mainline: v5.4-rc6
+Git-commit: 310d9b9d37220b590909e90e724fc5f346a98775
+References: jsc#SLE-8464
+
+Access the record fragments only under the TLS ctx lock.
+In the resync flow, save a copy of them to be used when
+preparing and posting the required DUMP WQEs.
+
+Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support")
+Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
+Reviewed-by: Eran Ben Elisha <eranbe@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+@@ -179,7 +179,7 @@ struct tx_sync_info {
+ u64 rcd_sn;
+ s32 sync_len;
+ int nr_frags;
+- skb_frag_t *frags[MAX_SKB_FRAGS];
++ skb_frag_t frags[MAX_SKB_FRAGS];
+ };
+
+ static bool tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
+@@ -212,11 +212,11 @@ static bool tx_sync_info_get(struct mlx5
+
+ get_page(skb_frag_page(frag));
+ remaining -= skb_frag_size(frag);
+- info->frags[i++] = frag;
++ info->frags[i++] = *frag;
+ }
+ /* reduce the part which will be sent with the original SKB */
+ if (remaining < 0)
+- skb_frag_size_add(info->frags[i - 1], remaining);
++ skb_frag_size_add(&info->frags[i - 1], remaining);
+ info->nr_frags = i;
+ out:
+ spin_unlock_irqrestore(&tx_ctx->lock, flags);
+@@ -365,7 +365,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_kt
+ tx_post_resync_params(sq, priv_tx, info.rcd_sn);
+
+ for (i = 0; i < info.nr_frags; i++)
+- if (tx_post_resync_dump(sq, info.frags[i], priv_tx->tisn, !i))
++ if (tx_post_resync_dump(sq, &info.frags[i], priv_tx->tisn, !i))
+ goto err_out;
+
+ /* If no dump WQE was sent, we need to have a fence NOP WQE before the
diff --git a/patches.suse/net-mlx5e-kTLS-Save-only-the-frag-page-to-release-at.patch b/patches.suse/net-mlx5e-kTLS-Save-only-the-frag-page-to-release-at.patch
new file mode 100644
index 0000000000..0bff4c3c83
--- /dev/null
+++ b/patches.suse/net-mlx5e-kTLS-Save-only-the-frag-page-to-release-at.patch
@@ -0,0 +1,121 @@
+From: Tariq Toukan <tariqt@mellanox.com>
+Date: Wed, 18 Sep 2019 13:50:32 +0300
+Subject: net/mlx5e: kTLS, Save only the frag page to release at completion
+Patch-mainline: v5.4-rc6
+Git-commit: f45da3716fb2fb09e301a1b6edf200ff343dc06e
+References: jsc#SLE-8464
+
+In TX resync flow where DUMP WQEs are posted, keep a pointer to
+the fragment page to unref it upon completion, instead of saving
+the whole fragment.
+
+In addition, move it the end of the arguments list in tx_fill_wi().
+
+Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support")
+Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
+Reviewed-by: Eran Ben Elisha <eranbe@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en.h | 2
+ drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 27 ++++++-------
+ 2 files changed, 14 insertions(+), 15 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+@@ -345,7 +345,7 @@ struct mlx5e_tx_wqe_info {
+ u8 num_wqebbs;
+ u8 num_dma;
+ #ifdef CONFIG_MLX5_EN_TLS
+- skb_frag_t *resync_dump_frag;
++ struct page *resync_dump_frag_page;
+ #endif
+ };
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+@@ -108,16 +108,15 @@ build_progress_params(struct mlx5e_tx_wq
+ }
+
+ static void tx_fill_wi(struct mlx5e_txqsq *sq,
+- u16 pi, u8 num_wqebbs,
+- skb_frag_t *resync_dump_frag,
+- u32 num_bytes)
++ u16 pi, u8 num_wqebbs, u32 num_bytes,
++ struct page *page)
+ {
+ struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
+
+- wi->skb = NULL;
+- wi->num_wqebbs = num_wqebbs;
+- wi->resync_dump_frag = resync_dump_frag;
+- wi->num_bytes = num_bytes;
++ memset(wi, 0, sizeof(*wi));
++ wi->num_wqebbs = num_wqebbs;
++ wi->num_bytes = num_bytes;
++ wi->resync_dump_frag_page = page;
+ }
+
+ void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
+@@ -145,7 +144,7 @@ post_static_params(struct mlx5e_txqsq *s
+
+ umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
+ build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
+- tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL, 0);
++ tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, 0, NULL);
+ sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
+ }
+
+@@ -159,7 +158,7 @@ post_progress_params(struct mlx5e_txqsq
+
+ wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
+ build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
+- tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL, 0);
++ tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, 0, NULL);
+ sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
+ }
+
+@@ -211,7 +210,7 @@ static bool tx_sync_info_get(struct mlx5
+ while (remaining > 0) {
+ skb_frag_t *frag = &record->frags[i];
+
+- __skb_frag_ref(frag);
++ get_page(skb_frag_page(frag));
+ remaining -= skb_frag_size(frag);
+ info->frags[i++] = frag;
+ }
+@@ -284,7 +283,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *
+ dseg->byte_count = cpu_to_be32(fsz);
+ mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
+
+- tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, frag, fsz);
++ tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag));
+ sq->pc += MLX5E_KTLS_DUMP_WQEBBS;
+
+ return 0;
+@@ -297,14 +296,14 @@ void mlx5e_ktls_tx_handle_resync_dump_co
+ struct mlx5e_sq_stats *stats;
+ struct mlx5e_sq_dma *dma;
+
+- if (!wi->resync_dump_frag)
++ if (!wi->resync_dump_frag_page)
+ return;
+
+ dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
+ stats = sq->stats;
+
+ mlx5e_tx_dma_unmap(sq->pdev, dma);
+- __skb_frag_unref(wi->resync_dump_frag);
++ put_page(wi->resync_dump_frag_page);
+ stats->tls_dump_packets++;
+ stats->tls_dump_bytes += wi->num_bytes;
+ }
+@@ -314,7 +313,7 @@ static void tx_post_fence_nop(struct mlx
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+
+- tx_fill_wi(sq, pi, 1, NULL, 0);
++ tx_fill_wi(sq, pi, 1, 0, NULL);
+
+ mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
+ }
diff --git a/patches.suse/net-mlx5e-kTLS-Size-of-a-Dump-WQE-is-fixed.patch b/patches.suse/net-mlx5e-kTLS-Size-of-a-Dump-WQE-is-fixed.patch
new file mode 100644
index 0000000000..30e314d24d
--- /dev/null
+++ b/patches.suse/net-mlx5e-kTLS-Size-of-a-Dump-WQE-is-fixed.patch
@@ -0,0 +1,105 @@
+From: Tariq Toukan <tariqt@mellanox.com>
+Date: Sun, 1 Sep 2019 13:53:26 +0300
+Subject: net/mlx5e: kTLS, Size of a Dump WQE is fixed
+Patch-mainline: v5.4-rc6
+Git-commit: 9b1fef2f23c1141c9936debe633ff16e44c6137b
+References: jsc#SLE-8464
+
+No Eth segment, so no dynamic inline headers.
+The size of a Dump WQE is fixed, use constants and remove
+unnecessary checks.
+
+Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support")
+Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
+Reviewed-by: Eran Ben Elisha <eranbe@mellanox.com>
+Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h | 2 -
+ drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h | 9 ++++++
+ drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 17 ++-----------
+ 3 files changed, 12 insertions(+), 16 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+@@ -23,7 +23,7 @@
+ #define MLX5E_SQ_TLS_ROOM \
+ (MLX5_SEND_WQE_MAX_WQEBBS + \
+ MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS + \
+- MAX_SKB_FRAGS * MLX5E_KTLS_MAX_DUMP_WQEBBS)
++ MAX_SKB_FRAGS * MLX5E_KTLS_DUMP_WQEBBS)
+ #endif
+
+ #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+@@ -21,7 +21,14 @@
+ MLX5_ST_SZ_BYTES(tls_progress_params))
+ #define MLX5E_KTLS_PROGRESS_WQEBBS \
+ (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB))
+-#define MLX5E_KTLS_MAX_DUMP_WQEBBS 2
++
++struct mlx5e_dump_wqe {
++ struct mlx5_wqe_ctrl_seg ctrl;
++ struct mlx5_wqe_data_seg data;
++};
++
++#define MLX5E_KTLS_DUMP_WQEBBS \
++ (DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
+
+ enum {
+ MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD = 0,
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+@@ -250,11 +250,6 @@ tx_post_resync_params(struct mlx5e_txqsq
+ mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
+ }
+
+-struct mlx5e_dump_wqe {
+- struct mlx5_wqe_ctrl_seg ctrl;
+- struct mlx5_wqe_data_seg data;
+-};
+-
+ static int
+ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first)
+ {
+@@ -262,7 +257,6 @@ tx_post_resync_dump(struct mlx5e_txqsq *
+ struct mlx5_wqe_data_seg *dseg;
+ struct mlx5e_dump_wqe *wqe;
+ dma_addr_t dma_addr = 0;
+- u8 num_wqebbs;
+ u16 ds_cnt;
+ int fsz;
+ u16 pi;
+@@ -270,7 +264,6 @@ tx_post_resync_dump(struct mlx5e_txqsq *
+ wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
+
+ ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
+- num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
+
+ cseg = &wqe->ctrl;
+ dseg = &wqe->data;
+@@ -291,12 +284,8 @@ tx_post_resync_dump(struct mlx5e_txqsq *
+ dseg->byte_count = cpu_to_be32(fsz);
+ mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
+
+- tx_fill_wi(sq, pi, num_wqebbs, frag, fsz);
+- sq->pc += num_wqebbs;
+-
+- WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS,
+- "unexpected DUMP num_wqebbs, %d > %d",
+- num_wqebbs, MLX5E_KTLS_MAX_DUMP_WQEBBS);
++ tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, frag, fsz);
++ sq->pc += MLX5E_KTLS_DUMP_WQEBBS;
+
+ return 0;
+ }
+@@ -368,7 +357,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_kt
+ stats->tls_ooo++;
+
+ num_wqebbs = MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS +
+- (info.nr_frags ? info.nr_frags * MLX5E_KTLS_MAX_DUMP_WQEBBS : 1);
++ (info.nr_frags ? info.nr_frags * MLX5E_KTLS_DUMP_WQEBBS : 1);
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ if (unlikely(contig_wqebbs_room < num_wqebbs))
diff --git a/patches.suse/net-netem-correct-the-parent-s-backlog-when-corrupte.patch b/patches.suse/net-netem-correct-the-parent-s-backlog-when-corrupte.patch
new file mode 100644
index 0000000000..2b7f5c887a
--- /dev/null
+++ b/patches.suse/net-netem-correct-the-parent-s-backlog-when-corrupte.patch
@@ -0,0 +1,33 @@
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+Date: Fri, 18 Oct 2019 09:16:58 -0700
+Subject: net: netem: correct the parent's backlog when corrupted packet was
+ dropped
+Patch-mainline: v5.4-rc4
+Git-commit: e0ad032e144731a5928f2d75e91c2064ba1a764c
+References: bsc#1154353
+
+If packet corruption failed we jump to finish_segs and return
+NET_XMIT_SUCCESS. Seeing success will make the parent qdisc
+increment its backlog, that's incorrect - we need to return
+NET_XMIT_DROP.
+
+Fixes: 6071bd1aa13e ("netem: Segment GSO packets on enqueue")
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Reviewed-by: Simon Horman <simon.horman@netronome.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/sched/sch_netem.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -616,6 +616,8 @@ finish_segs:
+ }
+ /* Parent qdiscs accounted for 1 skb of size @prev_len */
+ qdisc_tree_reduce_backlog(sch, -(nb - 1), -(len - prev_len));
++ } else if (!skb) {
++ return NET_XMIT_DROP;
+ }
+ return NET_XMIT_SUCCESS;
+ }
diff --git a/patches.suse/net-netem-fix-error-path-for-corrupted-GSO-frames.patch b/patches.suse/net-netem-fix-error-path-for-corrupted-GSO-frames.patch
new file mode 100644
index 0000000000..f08e676211
--- /dev/null
+++ b/patches.suse/net-netem-fix-error-path-for-corrupted-GSO-frames.patch
@@ -0,0 +1,67 @@
+From: Jakub Kicinski <jakub.kicinski@netronome.com>
+Date: Fri, 18 Oct 2019 09:16:57 -0700
+Subject: net: netem: fix error path for corrupted GSO frames
+Patch-mainline: v5.4-rc4
+Git-commit: a7fa12d15855904aff1716e1fc723c03ba38c5cc
+References: bsc#1154353
+
+To corrupt a GSO frame we first perform segmentation. We then
+proceed using the first segment instead of the full GSO skb and
+requeue the rest of the segments as separate packets.
+
+If there are any issues with processing the first segment we
+still want to process the rest, therefore we jump to the
+finish_segs label.
+
+Commit 177b8007463c ("net: netem: fix backlog accounting for
+corrupted GSO frames") started using the pointer to the first
+segment in the "rest of segments processing", but as mentioned
+above the first segment may had already been freed at this point.
+
+Backlog corrections for parent qdiscs have to be adjusted.
+
+Fixes: 177b8007463c ("net: netem: fix backlog accounting for corrupted GSO frames")
+Reported-by: kbuild test robot <lkp@intel.com>
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Reported-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Reviewed-by: Simon Horman <simon.horman@netronome.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/sched/sch_netem.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -509,6 +509,7 @@ static int netem_enqueue(struct sk_buff
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb_checksum_help(skb)) {
+ qdisc_drop(skb, sch, to_free);
++ skb = NULL;
+ goto finish_segs;
+ }
+
+@@ -593,9 +594,10 @@ static int netem_enqueue(struct sk_buff
+ finish_segs:
+ if (segs) {
+ unsigned int len, last_len;
+- int nb = 0;
++ int nb;
+
+- len = skb->len;
++ len = skb ? skb->len : 0;
++ nb = skb ? 1 : 0;
+
+ while (segs) {
+ skb2 = segs->next;
+@@ -612,7 +614,8 @@ finish_segs:
+ }
+ segs = skb2;
+ }
+- qdisc_tree_reduce_backlog(sch, -nb, prev_len - len);
++ /* Parent qdiscs accounted for 1 skb of size @prev_len */
++ qdisc_tree_reduce_backlog(sch, -(nb - 1), -(len - prev_len));
+ }
+ return NET_XMIT_SUCCESS;
+ }
diff --git a/patches.suse/net-phylink-Fix-phylink_dbg-macro.patch b/patches.suse/net-phylink-Fix-phylink_dbg-macro.patch
new file mode 100644
index 0000000000..fbe9f1f665
--- /dev/null
+++ b/patches.suse/net-phylink-Fix-phylink_dbg-macro.patch
@@ -0,0 +1,47 @@
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Thu, 31 Oct 2019 15:42:26 -0700
+Subject: net: phylink: Fix phylink_dbg() macro
+Patch-mainline: v5.4-rc6
+Git-commit: 9d68db5092c5fac99fccfdeab3f04df0b27d1762
+References: bsc#1154353
+
+The phylink_dbg() macro does not follow dynamic debug or defined(DEBUG)
+and as a result, it spams the kernel log since a PR_DEBUG level is
+currently used. Fix it to be defined appropriately whether
+CONFIG_DYNAMIC_DEBUG or defined(DEBUG) are set.
+
+Fixes: 17091180b152 ("net: phylink: Add phylink_{printk, err, warn, info, dbg} macros")
+Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/phy/phylink.c | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -87,8 +87,24 @@ struct phylink {
+ phylink_printk(KERN_WARNING, pl, fmt, ##__VA_ARGS__)
+ #define phylink_info(pl, fmt, ...) \
+ phylink_printk(KERN_INFO, pl, fmt, ##__VA_ARGS__)
++#if defined(CONFIG_DYNAMIC_DEBUG)
+ #define phylink_dbg(pl, fmt, ...) \
++do { \
++ if ((pl)->config->type == PHYLINK_NETDEV) \
++ netdev_dbg((pl)->netdev, fmt, ##__VA_ARGS__); \
++ else if ((pl)->config->type == PHYLINK_DEV) \
++ dev_dbg((pl)->dev, fmt, ##__VA_ARGS__); \
++} while (0)
++#elif defined(DEBUG)
++#define phylink_dbg(pl, fmt, ...) \
+ phylink_printk(KERN_DEBUG, pl, fmt, ##__VA_ARGS__)
++#else
++#define phylink_dbg(pl, fmt, ...) \
++({ \
++ if (0) \
++ phylink_printk(KERN_DEBUG, pl, fmt, ##__VA_ARGS__); \
++})
++#endif
+
+ /**
+ * phylink_set_port_modes() - set the port type modes in the ethtool mask
diff --git a/patches.suse/net-remove-unnecessary-variables-and-callback.patch b/patches.suse/net-remove-unnecessary-variables-and-callback.patch
new file mode 100644
index 0000000000..b34ab772f0
--- /dev/null
+++ b/patches.suse/net-remove-unnecessary-variables-and-callback.patch
@@ -0,0 +1,412 @@
+From: Taehee Yoo <ap420073@gmail.com>
+Date: Mon, 21 Oct 2019 18:47:58 +0000
+Subject: net: remove unnecessary variables and callback
+Patch-mainline: v5.4-rc6
+Git-commit: f3b0a18bb6cb07a9abb75e21b1f08eeaefa78e81
+References: bsc#1154353
+
+This patch removes variables and callback these are related to the nested
+device structure.
+devices that can be nested have their own nest_level variable that
+represents the depth of nested devices.
+In the previous patch, new {lower/upper}_level variables are added and
+they replace old private nest_level variable.
+So, this patch removes all 'nest_level' variables.
+
+In order to avoid lockdep warning, ->ndo_get_lock_subclass() was added
+to get lockdep subclass value, which is actually lower nested depth value.
+But now, they use the dynamic lockdep key to avoid lockdep warning instead
+of the subclass.
+So, this patch removes ->ndo_get_lock_subclass() callback.
+
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/bonding/bond_alb.c | 2 +-
+ drivers/net/bonding/bond_main.c | 15 ---------------
+ drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 2 +-
+ drivers/net/macsec.c | 9 ---------
+ drivers/net/macvlan.c | 7 -------
+ include/linux/if_macvlan.h | 1 -
+ include/linux/if_vlan.h | 11 -----------
+ include/linux/netdevice.h | 12 ------------
+ include/net/bonding.h | 1 -
+ net/8021q/vlan.c | 1 -
+ net/8021q/vlan_dev.c | 6 ------
+ net/core/dev.c | 19 -------------------
+ net/core/dev_addr_lists.c | 12 ++++++------
+ net/smc/smc_core.c | 2 +-
+ net/smc/smc_pnet.c | 2 +-
+ 15 files changed, 10 insertions(+), 92 deletions(-)
+
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -952,7 +952,7 @@ static int alb_upper_dev_walk(struct net
+ struct bond_vlan_tag *tags;
+
+ if (is_vlan_dev(upper) &&
+- bond->nest_level == vlan_get_encap_level(upper) - 1) {
++ bond->dev->lower_level == upper->lower_level - 1) {
+ if (upper->addr_assign_type == NET_ADDR_STOLEN) {
+ alb_send_lp_vid(slave, mac_addr,
+ vlan_dev_vlan_proto(upper),
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1733,8 +1733,6 @@ int bond_enslave(struct net_device *bond
+ goto err_upper_unlink;
+ }
+
+- bond->nest_level = dev_get_nest_level(bond_dev) + 1;
+-
+ /* If the mode uses primary, then the following is handled by
+ * bond_change_active_slave().
+ */
+@@ -1957,9 +1955,6 @@ static int __bond_release_one(struct net
+ if (!bond_has_slaves(bond)) {
+ bond_set_carrier(bond);
+ eth_hw_addr_random(bond_dev);
+- bond->nest_level = SINGLE_DEPTH_NESTING;
+- } else {
+- bond->nest_level = dev_get_nest_level(bond_dev) + 1;
+ }
+
+ unblock_netpoll_tx();
+@@ -3444,13 +3439,6 @@ static void bond_fold_stats(struct rtnl_
+ }
+ }
+
+-static int bond_get_nest_level(struct net_device *bond_dev)
+-{
+- struct bonding *bond = netdev_priv(bond_dev);
+-
+- return bond->nest_level;
+-}
+-
+ static void bond_get_stats(struct net_device *bond_dev,
+ struct rtnl_link_stats64 *stats)
+ {
+@@ -4270,7 +4258,6 @@ static const struct net_device_ops bond_
+ .ndo_neigh_setup = bond_neigh_setup,
+ .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
+- .ndo_get_lock_subclass = bond_get_nest_level,
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_netpoll_setup = bond_netpoll_setup,
+ .ndo_netpoll_cleanup = bond_netpoll_cleanup,
+@@ -4769,8 +4756,6 @@ static int bond_init(struct net_device *
+ if (!bond->wq)
+ return -ENOMEM;
+
+- bond->nest_level = SINGLE_DEPTH_NESTING;
+-
+ spin_lock_init(&bond->mode_lock);
+ spin_lock_init(&bond->stats_lock);
+ lockdep_register_key(&bond->stats_lock_key);
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -3160,7 +3160,7 @@ static int add_vlan_pop_action(struct ml
+ struct mlx5_esw_flow_attr *attr,
+ u32 *action)
+ {
+- int nest_level = vlan_get_encap_level(attr->parse_attr->filter_dev);
++ int nest_level = attr->parse_attr->filter_dev->lower_level;
+ struct flow_action_entry vlan_act = {
+ .id = FLOW_ACTION_VLAN_POP,
+ };
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -267,7 +267,6 @@ struct macsec_dev {
+ struct pcpu_secy_stats __percpu *stats;
+ struct list_head secys;
+ struct gro_cells gro_cells;
+- unsigned int nest_level;
+ };
+
+ /**
+@@ -2957,11 +2956,6 @@ static int macsec_get_iflink(const struc
+ return macsec_priv(dev)->real_dev->ifindex;
+ }
+
+-static int macsec_get_nest_level(struct net_device *dev)
+-{
+- return macsec_priv(dev)->nest_level;
+-}
+-
+ static const struct net_device_ops macsec_netdev_ops = {
+ .ndo_init = macsec_dev_init,
+ .ndo_uninit = macsec_dev_uninit,
+@@ -2975,7 +2969,6 @@ static const struct net_device_ops macse
+ .ndo_start_xmit = macsec_start_xmit,
+ .ndo_get_stats64 = macsec_get_stats64,
+ .ndo_get_iflink = macsec_get_iflink,
+- .ndo_get_lock_subclass = macsec_get_nest_level,
+ };
+
+ static const struct device_type macsec_type = {
+@@ -3262,8 +3255,6 @@ static int macsec_newlink(struct net *ne
+
+ dev_hold(real_dev);
+
+- macsec->nest_level = dev_get_nest_level(real_dev) + 1;
+-
+ err = netdev_upper_dev_link(real_dev, dev, extack);
+ if (err < 0)
+ goto unregister;
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -867,11 +867,6 @@ static int macvlan_do_ioctl(struct net_d
+ #define MACVLAN_STATE_MASK \
+ ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
+
+-static int macvlan_get_nest_level(struct net_device *dev)
+-{
+- return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
+-}
+-
+ static int macvlan_init(struct net_device *dev)
+ {
+ struct macvlan_dev *vlan = netdev_priv(dev);
+@@ -1149,7 +1144,6 @@ static const struct net_device_ops macvl
+ .ndo_fdb_add = macvlan_fdb_add,
+ .ndo_fdb_del = macvlan_fdb_del,
+ .ndo_fdb_dump = ndo_dflt_fdb_dump,
+- .ndo_get_lock_subclass = macvlan_get_nest_level,
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = macvlan_dev_poll_controller,
+ .ndo_netpoll_setup = macvlan_dev_netpoll_setup,
+@@ -1433,7 +1427,6 @@ int macvlan_common_newlink(struct net *s
+ vlan->dev = dev;
+ vlan->port = port;
+ vlan->set_features = MACVLAN_FEATURES;
+- vlan->nest_level = dev_get_nest_level(lowerdev) + 1;
+
+ vlan->mode = MACVLAN_MODE_VEPA;
+ if (data && data[IFLA_MACVLAN_MODE])
+--- a/include/linux/if_macvlan.h
++++ b/include/linux/if_macvlan.h
+@@ -29,7 +29,6 @@ struct macvlan_dev {
+ netdev_features_t set_features;
+ enum macvlan_mode mode;
+ u16 flags;
+- int nest_level;
+ unsigned int macaddr_count;
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+ struct netpoll *netpoll;
+--- a/include/linux/if_vlan.h
++++ b/include/linux/if_vlan.h
+@@ -182,7 +182,6 @@ struct vlan_dev_priv {
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+ struct netpoll *netpoll;
+ #endif
+- unsigned int nest_level;
+ };
+
+ static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
+@@ -221,11 +220,6 @@ extern void vlan_vids_del_by_dev(struct
+
+ extern bool vlan_uses_dev(const struct net_device *dev);
+
+-static inline int vlan_get_encap_level(struct net_device *dev)
+-{
+- BUG_ON(!is_vlan_dev(dev));
+- return vlan_dev_priv(dev)->nest_level;
+-}
+ #else
+ static inline struct net_device *
+ __vlan_find_dev_deep_rcu(struct net_device *real_dev,
+@@ -295,11 +289,6 @@ static inline bool vlan_uses_dev(const s
+ {
+ return false;
+ }
+-static inline int vlan_get_encap_level(struct net_device *dev)
+-{
+- BUG();
+- return 0;
+-}
+ #endif
+
+ /**
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1421,7 +1421,6 @@ struct net_device_ops {
+ void (*ndo_dfwd_del_station)(struct net_device *pdev,
+ void *priv);
+
+- int (*ndo_get_lock_subclass)(struct net_device *dev);
+ int (*ndo_set_tx_maxrate)(struct net_device *dev,
+ int queue_index,
+ u32 maxrate);
+@@ -4050,16 +4049,6 @@ static inline void netif_addr_lock(struc
+ spin_lock(&dev->addr_list_lock);
+ }
+
+-static inline void netif_addr_lock_nested(struct net_device *dev)
+-{
+- int subclass = SINGLE_DEPTH_NESTING;
+-
+- if (dev->netdev_ops->ndo_get_lock_subclass)
+- subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
+-
+- spin_lock_nested(&dev->addr_list_lock, subclass);
+-}
+-
+ static inline void netif_addr_lock_bh(struct net_device *dev)
+ {
+ spin_lock_bh(&dev->addr_list_lock);
+@@ -4344,7 +4333,6 @@ void netdev_lower_state_changed(struct n
+ extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
+ void netdev_rss_key_fill(void *buffer, size_t len);
+
+-int dev_get_nest_level(struct net_device *dev);
+ int skb_checksum_help(struct sk_buff *skb);
+ int skb_crc32c_csum_help(struct sk_buff *skb);
+ int skb_csum_hwoffload_help(struct sk_buff *skb,
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -203,7 +203,6 @@ struct bonding {
+ struct slave __rcu *primary_slave;
+ struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */
+ bool force_primary;
+- u32 nest_level;
+ s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
+ int (*recv_probe)(const struct sk_buff *, struct bonding *,
+ struct slave *);
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -172,7 +172,6 @@ int register_vlan_dev(struct net_device
+ if (err < 0)
+ goto out_uninit_mvrp;
+
+- vlan->nest_level = dev_get_nest_level(real_dev) + 1;
+ err = register_netdevice(dev);
+ if (err < 0)
+ goto out_uninit_mvrp;
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -489,11 +489,6 @@ static void vlan_dev_set_rx_mode(struct
+ dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+ }
+
+-static int vlan_dev_get_lock_subclass(struct net_device *dev)
+-{
+- return vlan_dev_priv(dev)->nest_level;
+-}
+-
+ static const struct header_ops vlan_header_ops = {
+ .create = vlan_dev_hard_header,
+ .parse = eth_header_parse,
+@@ -785,7 +780,6 @@ static const struct net_device_ops vlan_
+ .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup,
+ #endif
+ .ndo_fix_features = vlan_dev_fix_features,
+- .ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
+ .ndo_get_iflink = vlan_dev_get_iflink,
+ };
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -7615,25 +7615,6 @@ void *netdev_lower_dev_get_private(struc
+ EXPORT_SYMBOL(netdev_lower_dev_get_private);
+
+
+-int dev_get_nest_level(struct net_device *dev)
+-{
+- struct net_device *lower = NULL;
+- struct list_head *iter;
+- int max_nest = -1;
+- int nest;
+-
+- ASSERT_RTNL();
+-
+- netdev_for_each_lower_dev(dev, lower, iter) {
+- nest = dev_get_nest_level(lower);
+- if (max_nest < nest)
+- max_nest = nest;
+- }
+-
+- return max_nest + 1;
+-}
+-EXPORT_SYMBOL(dev_get_nest_level);
+-
+ /**
+ * netdev_lower_change - Dispatch event about lower device state change
+ * @lower_dev: device
+--- a/net/core/dev_addr_lists.c
++++ b/net/core/dev_addr_lists.c
+@@ -637,7 +637,7 @@ int dev_uc_sync(struct net_device *to, s
+ if (to->addr_len != from->addr_len)
+ return -EINVAL;
+
+- netif_addr_lock_nested(to);
++ netif_addr_lock(to);
+ err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
+ if (!err)
+ __dev_set_rx_mode(to);
+@@ -667,7 +667,7 @@ int dev_uc_sync_multiple(struct net_devi
+ if (to->addr_len != from->addr_len)
+ return -EINVAL;
+
+- netif_addr_lock_nested(to);
++ netif_addr_lock(to);
+ err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len);
+ if (!err)
+ __dev_set_rx_mode(to);
+@@ -691,7 +691,7 @@ void dev_uc_unsync(struct net_device *to
+ return;
+
+ netif_addr_lock_bh(from);
+- netif_addr_lock_nested(to);
++ netif_addr_lock(to);
+ __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
+ __dev_set_rx_mode(to);
+ netif_addr_unlock(to);
+@@ -858,7 +858,7 @@ int dev_mc_sync(struct net_device *to, s
+ if (to->addr_len != from->addr_len)
+ return -EINVAL;
+
+- netif_addr_lock_nested(to);
++ netif_addr_lock(to);
+ err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
+ if (!err)
+ __dev_set_rx_mode(to);
+@@ -888,7 +888,7 @@ int dev_mc_sync_multiple(struct net_devi
+ if (to->addr_len != from->addr_len)
+ return -EINVAL;
+
+- netif_addr_lock_nested(to);
++ netif_addr_lock(to);
+ err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len);
+ if (!err)
+ __dev_set_rx_mode(to);
+@@ -912,7 +912,7 @@ void dev_mc_unsync(struct net_device *to
+ return;
+
+ netif_addr_lock_bh(from);
+- netif_addr_lock_nested(to);
++ netif_addr_lock(to);
+ __hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
+ __dev_set_rx_mode(to);
+ netif_addr_unlock(to);
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -558,7 +558,7 @@ int smc_vlan_by_tcpsk(struct socket *clc
+ }
+
+ rtnl_lock();
+- nest_lvl = dev_get_nest_level(ndev);
++ nest_lvl = ndev->lower_level;
+ for (i = 0; i < nest_lvl; i++) {
+ struct list_head *lower = &ndev->adj_list.lower;
+
+--- a/net/smc/smc_pnet.c
++++ b/net/smc/smc_pnet.c
+@@ -718,7 +718,7 @@ static struct net_device *pnet_find_base
+ int i, nest_lvl;
+
+ rtnl_lock();
+- nest_lvl = dev_get_nest_level(ndev);
++ nest_lvl = ndev->lower_level;
+ for (i = 0; i < nest_lvl; i++) {
+ struct list_head *lower = &ndev->adj_list.lower;
+
diff --git a/patches.suse/net-reorder-struct-net-fields-to-avoid-false-sharing.patch b/patches.suse/net-reorder-struct-net-fields-to-avoid-false-sharing.patch
new file mode 100644
index 0000000000..59a6845cdf
--- /dev/null
+++ b/patches.suse/net-reorder-struct-net-fields-to-avoid-false-sharing.patch
@@ -0,0 +1,108 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 18 Oct 2019 15:20:05 -0700
+Subject: net: reorder 'struct net' fields to avoid false sharing
+Patch-mainline: v5.4-rc4
+Git-commit: 2a06b8982f8f2f40d03a3daf634676386bd84dbc
+References: bsc#1154353
+
+Intel test robot reported a ~7% regression on TCP_CRR tests
+that they bisected to the cited commit.
+
+Indeed, every time a new TCP socket is created or deleted,
+the atomic counter net->count is touched (via get_net(net)
+and put_net(net) calls)
+
+So cpus might have to reload a contended cache line in
+net_hash_mix(net) calls.
+
+We need to reorder 'struct net' fields to move @hash_mix
+in a read mostly cache line.
+
+We move in the first cache line fields that can be
+dirtied often.
+
+We probably will have to address in a followup patch
+the __randomize_layout that was added in linux-4.13,
+since this might break our placement choices.
+
+Fixes: 355b98553789 ("netns: provide pure entropy for net_hash_mix()")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: kernel test robot <oliver.sang@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ include/net/net_namespace.h | 25 +++++++++++++++++--------
+ 1 file changed, 17 insertions(+), 8 deletions(-)
+
+--- a/include/net/net_namespace.h
++++ b/include/net/net_namespace.h
+@@ -52,6 +52,9 @@ struct bpf_prog;
+ #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
+
+ struct net {
++ /* First cache line can be often dirtied.
++ * Do not place here read-mostly fields.
++ */
+ refcount_t passive; /* To decide when the network
+ * namespace should be freed.
+ */
+@@ -60,7 +63,13 @@ struct net {
+ */
+ spinlock_t rules_mod_lock;
+
+- u32 hash_mix;
++ unsigned int dev_unreg_count;
++
++ unsigned int dev_base_seq; /* protected by rtnl_mutex */
++ int ifindex;
++
++ spinlock_t nsid_lock;
++ atomic_t fnhe_genid;
+
+ struct list_head list; /* list of network namespaces */
+ struct list_head exit_list; /* To linked to call pernet exit
+@@ -76,11 +85,11 @@ struct net {
+ #endif
+ struct user_namespace *user_ns; /* Owning user namespace */
+ struct ucounts *ucounts;
+- spinlock_t nsid_lock;
+ struct idr netns_ids;
+
+ struct ns_common ns;
+
++ struct list_head dev_base_head;
+ struct proc_dir_entry *proc_net;
+ struct proc_dir_entry *proc_net_stat;
+
+@@ -93,17 +102,18 @@ struct net {
+
+ struct uevent_sock *uevent_sock; /* uevent socket */
+
+- struct list_head dev_base_head;
+ struct hlist_head *dev_name_head;
+ struct hlist_head *dev_index_head;
+- unsigned int dev_base_seq; /* protected by rtnl_mutex */
+- int ifindex;
+- unsigned int dev_unreg_count;
++ /* Note that @hash_mix can be read millions times per second,
++ * it is critical that it is on a read_mostly cache line.
++ */
++ u32 hash_mix;
++
++ struct net_device *loopback_dev; /* The loopback */
+
+ /* core fib_rules */
+ struct list_head rules_ops;
+
+- struct net_device *loopback_dev; /* The loopback */
+ struct netns_core core;
+ struct netns_mib mib;
+ struct netns_packet packet;
+@@ -168,7 +178,6 @@ struct net {
+ struct netns_xdp xdp;
+ #endif
+ struct sock *diag_nlsk;
+- atomic_t fnhe_genid;
+ } __randomize_layout;
+
+ #include <linux/seq_file_net.h>
diff --git a/patches.suse/net-rtnetlink-fix-a-typo-fbd-fdb.patch b/patches.suse/net-rtnetlink-fix-a-typo-fbd-fdb.patch
new file mode 100644
index 0000000000..5507177a14
--- /dev/null
+++ b/patches.suse/net-rtnetlink-fix-a-typo-fbd-fdb.patch
@@ -0,0 +1,30 @@
+From: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
+Date: Tue, 29 Oct 2019 13:59:32 +0200
+Subject: net: rtnetlink: fix a typo fbd -> fdb
+Patch-mainline: v5.4-rc6
+Git-commit: 8b73018fe44521c1cf59d7bac53624c87d3f10e2
+References: bsc#1154353
+
+A simple typo fix in the nl error message (fbd -> fdb).
+
+CC: David Ahern <dsahern@gmail.com>
+Fixes: 8c6e137fbc7f ("rtnetlink: Update rtnl_fdb_dump for strict data checking")
+Signed-off-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
+Reviewed-by: David Ahern <dsahern@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/core/rtnetlink.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -3917,7 +3917,7 @@ static int valid_fdb_dump_strict(const s
+ ndm = nlmsg_data(nlh);
+ if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
+ ndm->ndm_flags || ndm->ndm_type) {
+- NL_SET_ERR_MSG(extack, "Invalid values in header for fbd dump request");
++ NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request");
+ return -EINVAL;
+ }
+
diff --git a/patches.suse/net-sch_generic-Use-pfifo_fast-as-fallback-scheduler.patch b/patches.suse/net-sch_generic-Use-pfifo_fast-as-fallback-scheduler.patch
new file mode 100644
index 0000000000..a0b6c00036
--- /dev/null
+++ b/patches.suse/net-sch_generic-Use-pfifo_fast-as-fallback-scheduler.patch
@@ -0,0 +1,70 @@
+From: Vincent Prince <vincent.prince.fr@gmail.com>
+Date: Tue, 22 Oct 2019 17:09:50 +0200
+Subject: net: sch_generic: Use pfifo_fast as fallback scheduler for CAN
+ hardware
+Patch-mainline: v5.4-rc6
+Git-commit: fa784f2ac00e19edc0d6eb77ac791bc1eb366d7e
+References: bsc#1154353
+
+There is networking hardware that isn't based on Ethernet for layers 1 and 2.
+
+For example CAN.
+
+CAN is a multi-master serial bus standard for connecting Electronic Control
+Units [ECUs] also known as nodes. A frame on the CAN bus carries up to 8 bytes
+of payload. Frame corruption is detected by a CRC. However frame loss due to
+corruption is possible, but a quite unusual phenomenon.
+
+While fq_codel works great for TCP/IP, it doesn't for CAN. There are a lot of
+legacy protocols on top of CAN, which are not build with flow control or high
+CAN frame drop rates in mind.
+
+When using fq_codel, as soon as the queue reaches a certain delay based length,
+skbs from the head of the queue are silently dropped. Silently meaning that the
+user space using a send() or similar syscall doesn't get an error. However
+TCP's flow control algorithm will detect dropped packages and adjust the
+bandwidth accordingly.
+
+When using fq_codel and sending raw frames over CAN, which is the common use
+case, the user space thinks the package has been sent without problems, because
+send() returned without an error. pfifo_fast will drop skbs, if the queue
+length exceeds the maximum. But with this scheduler the skbs at the tail are
+dropped, an error (-ENOBUFS) is propagated to user space. So that the user
+space can slow down the package generation.
+
+On distributions, where fq_codel is made default via CONFIG_DEFAULT_NET_SCH
+during compile time, or set default during runtime with sysctl
+net.core.default_qdisc (see [1]), we get a bad user experience. In my test case
+with pfifo_fast, I can transfer thousands of million CAN frames without a frame
+drop. On the other hand with fq_codel there is more then one lost CAN frame per
+thousand frames.
+
+As pointed out fq_codel is not suited for CAN hardware, so this patch changes
+attach_one_default_qdisc() to use pfifo_fast for "ARPHRD_CAN" network devices.
+
+During transition of a netdev from down to up state the default queuing
+discipline is attached by attach_default_qdiscs() with the help of
+attach_one_default_qdisc(). This patch modifies attach_one_default_qdisc() to
+attach the pfifo_fast (pfifo_fast_ops) if the network device type is
+"ARPHRD_CAN".
+
+[1] https://github.com/systemd/systemd/issues/9194
+
+Signed-off-by: Vincent Prince <vincent.prince.fr@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/sched/sch_generic.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -1038,6 +1038,8 @@ static void attach_one_default_qdisc(str
+
+ if (dev->priv_flags & IFF_NO_QUEUE)
+ ops = &noqueue_qdisc_ops;
++ else if(dev->type == ARPHRD_CAN)
++ ops = &pfifo_fast_ops;
+
+ qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL);
+ if (!qdisc) {
diff --git a/patches.suse/net-silence-KCSAN-warnings-about-sk-sk_backlog.len-r.patch b/patches.suse/net-silence-KCSAN-warnings-about-sk-sk_backlog.len-r.patch
new file mode 100644
index 0000000000..cac62f53de
--- /dev/null
+++ b/patches.suse/net-silence-KCSAN-warnings-about-sk-sk_backlog.len-r.patch
@@ -0,0 +1,123 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 9 Oct 2019 15:41:03 -0700
+Subject: net: silence KCSAN warnings about sk->sk_backlog.len reads
+Patch-mainline: v5.4-rc4
+Git-commit: 70c2655849a25431f31b505a07fe0c861e5e41fb
+References: bsc#1154353
+
+sk->sk_backlog.len can be written by BH handlers, and read
+from process contexts in a lockless way.
+
+Note the write side should also use WRITE_ONCE() or a variant.
+We need some agreement about the best way to do this.
+
+syzbot reported :
+
+BUG: KCSAN: data-race in tcp_add_backlog / tcp_grow_window.isra.0
+
+write to 0xffff88812665f32c of 4 bytes by interrupt on cpu 1:
+ sk_add_backlog include/net/sock.h:934 [inline]
+ tcp_add_backlog+0x4a0/0xcc0 net/ipv4/tcp_ipv4.c:1737
+ tcp_v4_rcv+0x1aba/0x1bf0 net/ipv4/tcp_ipv4.c:1925
+ ip_protocol_deliver_rcu+0x51/0x470 net/ipv4/ip_input.c:204
+ ip_local_deliver_finish+0x110/0x140 net/ipv4/ip_input.c:231
+ NF_HOOK include/linux/netfilter.h:305 [inline]
+ NF_HOOK include/linux/netfilter.h:299 [inline]
+ ip_local_deliver+0x133/0x210 net/ipv4/ip_input.c:252
+ dst_input include/net/dst.h:442 [inline]
+ ip_rcv_finish+0x121/0x160 net/ipv4/ip_input.c:413
+ NF_HOOK include/linux/netfilter.h:305 [inline]
+ NF_HOOK include/linux/netfilter.h:299 [inline]
+ ip_rcv+0x18f/0x1a0 net/ipv4/ip_input.c:523
+ __netif_receive_skb_one_core+0xa7/0xe0 net/core/dev.c:5004
+ __netif_receive_skb+0x37/0xf0 net/core/dev.c:5118
+ netif_receive_skb_internal+0x59/0x190 net/core/dev.c:5208
+ napi_skb_finish net/core/dev.c:5671 [inline]
+ napi_gro_receive+0x28f/0x330 net/core/dev.c:5704
+ receive_buf+0x284/0x30b0 drivers/net/virtio_net.c:1061
+ virtnet_receive drivers/net/virtio_net.c:1323 [inline]
+ virtnet_poll+0x436/0x7d0 drivers/net/virtio_net.c:1428
+ napi_poll net/core/dev.c:6352 [inline]
+ net_rx_action+0x3ae/0xa50 net/core/dev.c:6418
+
+read to 0xffff88812665f32c of 4 bytes by task 7292 on cpu 0:
+ tcp_space include/net/tcp.h:1373 [inline]
+ tcp_grow_window.isra.0+0x6b/0x480 net/ipv4/tcp_input.c:413
+ tcp_event_data_recv+0x68f/0x990 net/ipv4/tcp_input.c:717
+ tcp_rcv_established+0xbfe/0xf50 net/ipv4/tcp_input.c:5618
+ tcp_v4_do_rcv+0x381/0x4e0 net/ipv4/tcp_ipv4.c:1542
+ sk_backlog_rcv include/net/sock.h:945 [inline]
+ __release_sock+0x135/0x1e0 net/core/sock.c:2427
+ release_sock+0x61/0x160 net/core/sock.c:2943
+ tcp_recvmsg+0x63b/0x1a30 net/ipv4/tcp.c:2181
+ inet_recvmsg+0xbb/0x250 net/ipv4/af_inet.c:838
+ sock_recvmsg_nosec net/socket.c:871 [inline]
+ sock_recvmsg net/socket.c:889 [inline]
+ sock_recvmsg+0x92/0xb0 net/socket.c:885
+ sock_read_iter+0x15f/0x1e0 net/socket.c:967
+ call_read_iter include/linux/fs.h:1864 [inline]
+ new_sync_read+0x389/0x4f0 fs/read_write.c:414
+ __vfs_read+0xb1/0xc0 fs/read_write.c:427
+ vfs_read fs/read_write.c:461 [inline]
+ vfs_read+0x143/0x2c0 fs/read_write.c:446
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 0 PID: 7292 Comm: syz-fuzzer Not tainted 5.3.0+ #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ include/net/tcp.h | 3 ++-
+ net/core/sock.c | 2 +-
+ net/sctp/diag.c | 2 +-
+ net/tipc/socket.c | 2 +-
+ 4 files changed, 5 insertions(+), 4 deletions(-)
+
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1380,7 +1380,8 @@ static inline int tcp_win_from_space(con
+ /* Note: caller must be prepared to deal with negative returns */
+ static inline int tcp_space(const struct sock *sk)
+ {
+- return tcp_win_from_space(sk, sk->sk_rcvbuf - sk->sk_backlog.len -
++ return tcp_win_from_space(sk, sk->sk_rcvbuf -
++ READ_ONCE(sk->sk_backlog.len) -
+ atomic_read(&sk->sk_rmem_alloc));
+ }
+
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -3210,7 +3210,7 @@ void sk_get_meminfo(const struct sock *s
+ mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
+ mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
+ mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
+- mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
++ mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
+ mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
+ }
+
+--- a/net/sctp/diag.c
++++ b/net/sctp/diag.c
+@@ -175,7 +175,7 @@ static int inet_sctp_diag_fill(struct so
+ mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
+ mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
+ mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
+- mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
++ mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
+ mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
+
+ if (nla_put(skb, INET_DIAG_SKMEMINFO, sizeof(mem), &mem) < 0)
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -3790,7 +3790,7 @@ int tipc_sk_dump(struct sock *sk, u16 dq
+ i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf);
+ i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk));
+ i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf);
+- i += scnprintf(buf + i, sz - i, " | %d\n", sk->sk_backlog.len);
++ i += scnprintf(buf + i, sz - i, " | %d\n", READ_ONCE(sk->sk_backlog.len));
+
+ if (dqueues & TIPC_DUMP_SK_SNDQ) {
+ i += scnprintf(buf + i, sz - i, "sk_write_queue: ");
diff --git a/patches.suse/net-silence-KCSAN-warnings-around-sk_add_backlog-cal.patch b/patches.suse/net-silence-KCSAN-warnings-around-sk_add_backlog-cal.patch
new file mode 100644
index 0000000000..fcd6f23e59
--- /dev/null
+++ b/patches.suse/net-silence-KCSAN-warnings-around-sk_add_backlog-cal.patch
@@ -0,0 +1,174 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 9 Oct 2019 15:21:13 -0700
+Subject: net: silence KCSAN warnings around sk_add_backlog() calls
+Patch-mainline: v5.4-rc4
+Git-commit: 8265792bf8871acc2d00fd03883d830e2249d395
+References: bsc#1154353
+
+sk_add_backlog() callers usually read sk->sk_rcvbuf without
+owning the socket lock. This means sk_rcvbuf value can
+be changed by other cpus, and KCSAN complains.
+
+Add READ_ONCE() annotations to document the lockless nature
+of these reads.
+
+Note that writes over sk_rcvbuf should also use WRITE_ONCE(),
+but this will be done in separate patches to ease stable
+backports (if we decide this is relevant for stable trees).
+
+BUG: KCSAN: data-race in tcp_add_backlog / tcp_recvmsg
+
+write to 0xffff88812ab369f8 of 8 bytes by interrupt on cpu 1:
+ __sk_add_backlog include/net/sock.h:902 [inline]
+ sk_add_backlog include/net/sock.h:933 [inline]
+ tcp_add_backlog+0x45a/0xcc0 net/ipv4/tcp_ipv4.c:1737
+ tcp_v4_rcv+0x1aba/0x1bf0 net/ipv4/tcp_ipv4.c:1925
+ ip_protocol_deliver_rcu+0x51/0x470 net/ipv4/ip_input.c:204
+ ip_local_deliver_finish+0x110/0x140 net/ipv4/ip_input.c:231
+ NF_HOOK include/linux/netfilter.h:305 [inline]
+ NF_HOOK include/linux/netfilter.h:299 [inline]
+ ip_local_deliver+0x133/0x210 net/ipv4/ip_input.c:252
+ dst_input include/net/dst.h:442 [inline]
+ ip_rcv_finish+0x121/0x160 net/ipv4/ip_input.c:413
+ NF_HOOK include/linux/netfilter.h:305 [inline]
+ NF_HOOK include/linux/netfilter.h:299 [inline]
+ ip_rcv+0x18f/0x1a0 net/ipv4/ip_input.c:523
+ __netif_receive_skb_one_core+0xa7/0xe0 net/core/dev.c:5004
+ __netif_receive_skb+0x37/0xf0 net/core/dev.c:5118
+ netif_receive_skb_internal+0x59/0x190 net/core/dev.c:5208
+ napi_skb_finish net/core/dev.c:5671 [inline]
+ napi_gro_receive+0x28f/0x330 net/core/dev.c:5704
+ receive_buf+0x284/0x30b0 drivers/net/virtio_net.c:1061
+ virtnet_receive drivers/net/virtio_net.c:1323 [inline]
+ virtnet_poll+0x436/0x7d0 drivers/net/virtio_net.c:1428
+ napi_poll net/core/dev.c:6352 [inline]
+ net_rx_action+0x3ae/0xa50 net/core/dev.c:6418
+
+read to 0xffff88812ab369f8 of 8 bytes by task 7271 on cpu 0:
+ tcp_recvmsg+0x470/0x1a30 net/ipv4/tcp.c:2047
+ inet_recvmsg+0xbb/0x250 net/ipv4/af_inet.c:838
+ sock_recvmsg_nosec net/socket.c:871 [inline]
+ sock_recvmsg net/socket.c:889 [inline]
+ sock_recvmsg+0x92/0xb0 net/socket.c:885
+ sock_read_iter+0x15f/0x1e0 net/socket.c:967
+ call_read_iter include/linux/fs.h:1864 [inline]
+ new_sync_read+0x389/0x4f0 fs/read_write.c:414
+ __vfs_read+0xb1/0xc0 fs/read_write.c:427
+ vfs_read fs/read_write.c:461 [inline]
+ vfs_read+0x143/0x2c0 fs/read_write.c:446
+ ksys_read+0xd5/0x1b0 fs/read_write.c:587
+ __do_sys_read fs/read_write.c:597 [inline]
+ __se_sys_read fs/read_write.c:595 [inline]
+ __x64_sys_read+0x4c/0x60 fs/read_write.c:595
+ do_syscall_64+0xcf/0x2f0 arch/x86/entry/common.c:296
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 0 PID: 7271 Comm: syz-fuzzer Not tainted 5.3.0+ #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/core/sock.c | 2 +-
+ net/ipv4/tcp_ipv4.c | 2 +-
+ net/llc/llc_conn.c | 2 +-
+ net/sctp/input.c | 6 +++---
+ net/tipc/socket.c | 6 +++---
+ net/x25/x25_dev.c | 2 +-
+ 6 files changed, 10 insertions(+), 10 deletions(-)
+
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -522,7 +522,7 @@ int __sk_receive_skb(struct sock *sk, st
+ rc = sk_backlog_rcv(sk, skb);
+
+ mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
+- } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
++ } else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) {
+ bh_unlock_sock(sk);
+ atomic_inc(&sk->sk_drops);
+ goto discard_and_relse;
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1640,7 +1640,7 @@ int tcp_v4_early_demux(struct sk_buff *s
+
+ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
+ {
+- u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
++ u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
+ struct skb_shared_info *shinfo;
+ const struct tcphdr *th;
+ struct tcphdr *thtail;
+--- a/net/llc/llc_conn.c
++++ b/net/llc/llc_conn.c
+@@ -813,7 +813,7 @@ void llc_conn_handler(struct llc_sap *sa
+ else {
+ dprintk("%s: adding to backlog...\n", __func__);
+ llc_set_backlog_type(skb, LLC_PACKET);
+- if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
++ if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
+ goto drop_unlock;
+ }
+ out:
+--- a/net/sctp/input.c
++++ b/net/sctp/input.c
+@@ -322,7 +322,7 @@ int sctp_backlog_rcv(struct sock *sk, st
+ bh_lock_sock(sk);
+
+ if (sock_owned_by_user(sk) || !sctp_newsk_ready(sk)) {
+- if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
++ if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
+ sctp_chunk_free(chunk);
+ else
+ backloged = 1;
+@@ -337,7 +337,7 @@ int sctp_backlog_rcv(struct sock *sk, st
+ return 0;
+ } else {
+ if (!sctp_newsk_ready(sk)) {
+- if (!sk_add_backlog(sk, skb, sk->sk_rcvbuf))
++ if (!sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
+ return 0;
+ sctp_chunk_free(chunk);
+ } else {
+@@ -364,7 +364,7 @@ static int sctp_add_backlog(struct sock
+ struct sctp_ep_common *rcvr = chunk->rcvr;
+ int ret;
+
+- ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf);
++ ret = sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf));
+ if (!ret) {
+ /* Hold the assoc/ep while hanging on the backlog queue.
+ * This way, we know structures we need will not disappear
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -2119,13 +2119,13 @@ static unsigned int rcvbuf_limit(struct
+ struct tipc_msg *hdr = buf_msg(skb);
+
+ if (unlikely(msg_in_group(hdr)))
+- return sk->sk_rcvbuf;
++ return READ_ONCE(sk->sk_rcvbuf);
+
+ if (unlikely(!msg_connected(hdr)))
+- return sk->sk_rcvbuf << msg_importance(hdr);
++ return READ_ONCE(sk->sk_rcvbuf) << msg_importance(hdr);
+
+ if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
+- return sk->sk_rcvbuf;
++ return READ_ONCE(sk->sk_rcvbuf);
+
+ return FLOWCTL_MSG_LIM;
+ }
+--- a/net/x25/x25_dev.c
++++ b/net/x25/x25_dev.c
+@@ -55,7 +55,7 @@ static int x25_receive_data(struct sk_bu
+ if (!sock_owned_by_user(sk)) {
+ queued = x25_process_rx_frame(sk, skb);
+ } else {
+- queued = !sk_add_backlog(sk, skb, sk->sk_rcvbuf);
++ queued = !sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf));
+ }
+ bh_unlock_sock(sk);
+ sock_put(sk);
diff --git a/patches.suse/net-use-skb_queue_empty_lockless-in-busy-poll-contex.patch b/patches.suse/net-use-skb_queue_empty_lockless-in-busy-poll-contex.patch
new file mode 100644
index 0000000000..dddaceed24
--- /dev/null
+++ b/patches.suse/net-use-skb_queue_empty_lockless-in-busy-poll-contex.patch
@@ -0,0 +1,79 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 23 Oct 2019 22:44:51 -0700
+Subject: net: use skb_queue_empty_lockless() in busy poll contexts
+Patch-mainline: v5.4-rc6
+Git-commit: 3f926af3f4d688e2e11e7f8ed04e277a14d4d4a4
+References: bsc#1154353
+
+Busy polling usually runs without locks.
+Let's use skb_queue_empty_lockless() instead of skb_queue_empty()
+
+Also uses READ_ONCE() in __skb_try_recv_datagram() to address
+a similar potential problem.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/crypto/chelsio/chtls/chtls_io.c | 2 +-
+ net/core/datagram.c | 2 +-
+ net/core/sock.c | 2 +-
+ net/ipv4/tcp.c | 2 +-
+ net/sctp/socket.c | 2 +-
+ 5 files changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/crypto/chelsio/chtls/chtls_io.c
++++ b/drivers/crypto/chelsio/chtls/chtls_io.c
+@@ -1703,7 +1703,7 @@ int chtls_recvmsg(struct sock *sk, struc
+ return peekmsg(sk, msg, len, nonblock, flags);
+
+ if (sk_can_busy_loop(sk) &&
+- skb_queue_empty(&sk->sk_receive_queue) &&
++ skb_queue_empty_lockless(&sk->sk_receive_queue) &&
+ sk->sk_state == TCP_ESTABLISHED)
+ sk_busy_loop(sk, nonblock);
+
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -278,7 +278,7 @@ struct sk_buff *__skb_try_recv_datagram(
+ break;
+
+ sk_busy_loop(sk, flags & MSG_DONTWAIT);
+- } while (sk->sk_receive_queue.prev != *last);
++ } while (READ_ONCE(sk->sk_receive_queue.prev) != *last);
+
+ error = -EAGAIN;
+
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -3600,7 +3600,7 @@ bool sk_busy_loop_end(void *p, unsigned
+ {
+ struct sock *sk = p;
+
+- return !skb_queue_empty(&sk->sk_receive_queue) ||
++ return !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
+ sk_busy_loop_timeout(sk, start_time);
+ }
+ EXPORT_SYMBOL(sk_busy_loop_end);
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1963,7 +1963,7 @@ int tcp_recvmsg(struct sock *sk, struct
+ if (unlikely(flags & MSG_ERRQUEUE))
+ return inet_recv_error(sk, msg, len, addr_len);
+
+- if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) &&
++ if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue) &&
+ (sk->sk_state == TCP_ESTABLISHED))
+ sk_busy_loop(sk, nonblock);
+
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -8724,7 +8724,7 @@ struct sk_buff *sctp_skb_recv_datagram(s
+ if (sk_can_busy_loop(sk)) {
+ sk_busy_loop(sk, noblock);
+
+- if (!skb_queue_empty(&sk->sk_receive_queue))
++ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+ continue;
+ }
+
diff --git a/patches.suse/net-use-skb_queue_empty_lockless-in-poll-handlers.patch b/patches.suse/net-use-skb_queue_empty_lockless-in-poll-handlers.patch
new file mode 100644
index 0000000000..7c1c873c00
--- /dev/null
+++ b/patches.suse/net-use-skb_queue_empty_lockless-in-poll-handlers.patch
@@ -0,0 +1,246 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 23 Oct 2019 22:44:50 -0700
+Subject: net: use skb_queue_empty_lockless() in poll() handlers
+Patch-mainline: v5.4-rc6
+Git-commit: 3ef7cf57c72f32f61e97f8fa401bc39ea1f1a5d4
+References: bsc#1154353
+
+Many poll() handlers are lockless. Using skb_queue_empty_lockless()
+instead of skb_queue_empty() is more appropriate.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/isdn/capi/capi.c | 2 +-
+ net/atm/common.c | 2 +-
+ net/bluetooth/af_bluetooth.c | 4 ++--
+ net/caif/caif_socket.c | 2 +-
+ net/core/datagram.c | 4 ++--
+ net/decnet/af_decnet.c | 2 +-
+ net/ipv4/tcp.c | 2 +-
+ net/ipv4/udp.c | 2 +-
+ net/nfc/llcp_sock.c | 4 ++--
+ net/phonet/socket.c | 4 ++--
+ net/sctp/socket.c | 4 ++--
+ net/tipc/socket.c | 4 ++--
+ net/unix/af_unix.c | 6 +++---
+ net/vmw_vsock/af_vsock.c | 2 +-
+ 14 files changed, 22 insertions(+), 22 deletions(-)
+
+--- a/drivers/isdn/capi/capi.c
++++ b/drivers/isdn/capi/capi.c
+@@ -744,7 +744,7 @@ capi_poll(struct file *file, poll_table
+
+ poll_wait(file, &(cdev->recvwait), wait);
+ mask = EPOLLOUT | EPOLLWRNORM;
+- if (!skb_queue_empty(&cdev->recvqueue))
++ if (!skb_queue_empty_lockless(&cdev->recvqueue))
+ mask |= EPOLLIN | EPOLLRDNORM;
+ return mask;
+ }
+--- a/net/atm/common.c
++++ b/net/atm/common.c
+@@ -668,7 +668,7 @@ __poll_t vcc_poll(struct file *file, str
+ mask |= EPOLLHUP;
+
+ /* readable? */
+- if (!skb_queue_empty(&sk->sk_receive_queue))
++ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ /* writable? */
+--- a/net/bluetooth/af_bluetooth.c
++++ b/net/bluetooth/af_bluetooth.c
+@@ -460,7 +460,7 @@ __poll_t bt_sock_poll(struct file *file,
+ if (sk->sk_state == BT_LISTEN)
+ return bt_accept_poll(sk);
+
+- if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
++ if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
+ mask |= EPOLLERR |
+ (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
+
+@@ -470,7 +470,7 @@ __poll_t bt_sock_poll(struct file *file,
+ if (sk->sk_shutdown == SHUTDOWN_MASK)
+ mask |= EPOLLHUP;
+
+- if (!skb_queue_empty(&sk->sk_receive_queue))
++ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ if (sk->sk_state == BT_CLOSED)
+--- a/net/caif/caif_socket.c
++++ b/net/caif/caif_socket.c
+@@ -953,7 +953,7 @@ static __poll_t caif_poll(struct file *f
+ mask |= EPOLLRDHUP;
+
+ /* readable? */
+- if (!skb_queue_empty(&sk->sk_receive_queue) ||
++ if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
+ (sk->sk_shutdown & RCV_SHUTDOWN))
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -767,7 +767,7 @@ __poll_t datagram_poll(struct file *file
+ mask = 0;
+
+ /* exceptional events? */
+- if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
++ if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
+ mask |= EPOLLERR |
+ (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
+
+@@ -777,7 +777,7 @@ __poll_t datagram_poll(struct file *file
+ mask |= EPOLLHUP;
+
+ /* readable? */
+- if (!skb_queue_empty(&sk->sk_receive_queue))
++ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ /* Connection-based need to check for termination and startup */
+--- a/net/decnet/af_decnet.c
++++ b/net/decnet/af_decnet.c
+@@ -1205,7 +1205,7 @@ static __poll_t dn_poll(struct file *fil
+ struct dn_scp *scp = DN_SK(sk);
+ __poll_t mask = datagram_poll(file, sock, wait);
+
+- if (!skb_queue_empty(&scp->other_receive_queue))
++ if (!skb_queue_empty_lockless(&scp->other_receive_queue))
+ mask |= EPOLLRDBAND;
+
+ return mask;
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -584,7 +584,7 @@ __poll_t tcp_poll(struct file *file, str
+ }
+ /* This barrier is coupled with smp_wmb() in tcp_reset() */
+ smp_rmb();
+- if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
++ if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
+ mask |= EPOLLERR;
+
+ return mask;
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -2712,7 +2712,7 @@ __poll_t udp_poll(struct file *file, str
+ __poll_t mask = datagram_poll(file, sock, wait);
+ struct sock *sk = sock->sk;
+
+- if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
++ if (!skb_queue_empty_lockless(&udp_sk(sk)->reader_queue))
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ /* Check for false positives due to checksum errors */
+--- a/net/nfc/llcp_sock.c
++++ b/net/nfc/llcp_sock.c
+@@ -554,11 +554,11 @@ static __poll_t llcp_sock_poll(struct fi
+ if (sk->sk_state == LLCP_LISTEN)
+ return llcp_accept_poll(sk);
+
+- if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
++ if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
+ mask |= EPOLLERR |
+ (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
+
+- if (!skb_queue_empty(&sk->sk_receive_queue))
++ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ if (sk->sk_state == LLCP_CLOSED)
+--- a/net/phonet/socket.c
++++ b/net/phonet/socket.c
+@@ -338,9 +338,9 @@ static __poll_t pn_socket_poll(struct fi
+
+ if (sk->sk_state == TCP_CLOSE)
+ return EPOLLERR;
+- if (!skb_queue_empty(&sk->sk_receive_queue))
++ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+ mask |= EPOLLIN | EPOLLRDNORM;
+- if (!skb_queue_empty(&pn->ctrlreq_queue))
++ if (!skb_queue_empty_lockless(&pn->ctrlreq_queue))
+ mask |= EPOLLPRI;
+ if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
+ return EPOLLHUP;
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -8329,7 +8329,7 @@ __poll_t sctp_poll(struct file *file, st
+ mask = 0;
+
+ /* Is there any exceptional events? */
+- if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
++ if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
+ mask |= EPOLLERR |
+ (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+@@ -8338,7 +8338,7 @@ __poll_t sctp_poll(struct file *file, st
+ mask |= EPOLLHUP;
+
+ /* Is it readable? Reconsider this code with TCP-style support. */
+- if (!skb_queue_empty(&sk->sk_receive_queue))
++ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ /* The association is either gone or not ready. */
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -740,7 +740,7 @@ static __poll_t tipc_poll(struct file *f
+ /* fall through */
+ case TIPC_LISTEN:
+ case TIPC_CONNECTING:
+- if (!skb_queue_empty(&sk->sk_receive_queue))
++ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+ revents |= EPOLLIN | EPOLLRDNORM;
+ break;
+ case TIPC_OPEN:
+@@ -748,7 +748,7 @@ static __poll_t tipc_poll(struct file *f
+ revents |= EPOLLOUT;
+ if (!tipc_sk_type_connectionless(sk))
+ break;
+- if (skb_queue_empty(&sk->sk_receive_queue))
++ if (skb_queue_empty_lockless(&sk->sk_receive_queue))
+ break;
+ revents |= EPOLLIN | EPOLLRDNORM;
+ break;
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -2599,7 +2599,7 @@ static __poll_t unix_poll(struct file *f
+ mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
+
+ /* readable? */
+- if (!skb_queue_empty(&sk->sk_receive_queue))
++ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ /* Connection-based need to check for termination and startup */
+@@ -2628,7 +2628,7 @@ static __poll_t unix_dgram_poll(struct f
+ mask = 0;
+
+ /* exceptional events? */
+- if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
++ if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
+ mask |= EPOLLERR |
+ (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
+
+@@ -2638,7 +2638,7 @@ static __poll_t unix_dgram_poll(struct f
+ mask |= EPOLLHUP;
+
+ /* readable? */
+- if (!skb_queue_empty(&sk->sk_receive_queue))
++ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ /* Connection-based need to check for termination and startup */
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -870,7 +870,7 @@ static __poll_t vsock_poll(struct file *
+ * the queue and write as long as the socket isn't shutdown for
+ * sending.
+ */
+- if (!skb_queue_empty(&sk->sk_receive_queue) ||
++ if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
+ (sk->sk_shutdown & RCV_SHUTDOWN)) {
+ mask |= EPOLLIN | EPOLLRDNORM;
+ }
diff --git a/patches.suse/netdevsim-Fix-use-after-free-during-device-dismantle.patch b/patches.suse/netdevsim-Fix-use-after-free-during-device-dismantle.patch
new file mode 100644
index 0000000000..b665573bba
--- /dev/null
+++ b/patches.suse/netdevsim-Fix-use-after-free-during-device-dismantle.patch
@@ -0,0 +1,151 @@
+From: Ido Schimmel <idosch@mellanox.com>
+Date: Thu, 31 Oct 2019 18:20:30 +0200
+Subject: netdevsim: Fix use-after-free during device dismantle
+Patch-mainline: v5.4-rc6
+Git-commit: 6d6f0383b697f004c65823c2b64240912f18515d
+References: bsc#1154353
+
+Commit da58f90f11f5 ("netdevsim: Add devlink-trap support") added
+delayed work to netdevsim that periodically iterates over the registered
+netdevsim ports and reports various packet traps via devlink.
+
+While the delayed work takes the 'port_list_lock' mutex to protect
+against concurrent addition / deletion of ports, during device creation
+/ dismantle ports are added / deleted without this lock, which can
+result in a use-after-free [1].
+
+Fix this by making sure that the ports list is always modified under the
+lock.
+
+[1]
+[ 59.205543] ==================================================================
+[ 59.207748] BUG: KASAN: use-after-free in nsim_dev_trap_report_work+0xa67/0xad0
+[ 59.210247] Read of size 8 at addr ffff8883cbdd3398 by task kworker/3:1/38
+[ 59.212584]
+[ 59.213148] CPU: 3 PID: 38 Comm: kworker/3:1 Not tainted 5.4.0-rc3-custom-16119-ge6abb5f0261e #2013
+[ 59.215896] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS ?-20180724_192412-buildhw-07.phx2.fedoraproject.org-1.fc29 04/01/2014
+[ 59.218384] Workqueue: events nsim_dev_trap_report_work
+[ 59.219428] Call Trace:
+[ 59.219924] dump_stack+0xa9/0x10e
+[ 59.220623] print_address_description.constprop.4+0x21/0x340
+[ 59.221976] ? vprintk_func+0x66/0x240
+[ 59.222752] __kasan_report.cold.8+0x78/0x91
+[ 59.223602] ? nsim_dev_trap_report_work+0xa67/0xad0
+[ 59.224603] kasan_report+0xe/0x20
+[ 59.225296] nsim_dev_trap_report_work+0xa67/0xad0
+[ 59.226435] ? rcu_read_lock_sched_held+0xaf/0xe0
+[ 59.227512] ? trace_event_raw_event_rcu_quiescent_state_report+0x360/0x360
+[ 59.228851] process_one_work+0x98f/0x1760
+[ 59.229684] ? pwq_dec_nr_in_flight+0x330/0x330
+[ 59.230656] worker_thread+0x91/0xc40
+[ 59.231587] ? process_one_work+0x1760/0x1760
+[ 59.232451] kthread+0x34a/0x410
+[ 59.233104] ? __kthread_queue_delayed_work+0x240/0x240
+[ 59.234141] ret_from_fork+0x3a/0x50
+[ 59.234982]
+[ 59.235371] Allocated by task 187:
+[ 59.236189] save_stack+0x19/0x80
+[ 59.236853] __kasan_kmalloc.constprop.5+0xc1/0xd0
+[ 59.237822] kmem_cache_alloc_trace+0x14c/0x380
+[ 59.238769] __nsim_dev_port_add+0xaf/0x5c0
+[ 59.239627] nsim_dev_probe+0x4fc/0x1140
+[ 59.240550] really_probe+0x264/0xc00
+[ 59.241418] driver_probe_device+0x208/0x2e0
+[ 59.242255] __device_attach_driver+0x215/0x2d0
+[ 59.243150] bus_for_each_drv+0x154/0x1d0
+[ 59.243944] __device_attach+0x1ba/0x2b0
+[ 59.244923] bus_probe_device+0x1dd/0x290
+[ 59.245805] device_add+0xbac/0x1550
+[ 59.246528] new_device_store+0x1f4/0x400
+[ 59.247306] bus_attr_store+0x7b/0xa0
+[ 59.248047] sysfs_kf_write+0x10f/0x170
+[ 59.248941] kernfs_fop_write+0x283/0x430
+[ 59.249843] __vfs_write+0x81/0x100
+[ 59.250546] vfs_write+0x1ce/0x510
+[ 59.251190] ksys_write+0x104/0x200
+[ 59.251873] do_syscall_64+0xa4/0x4e0
+[ 59.252642] entry_SYSCALL_64_after_hwframe+0x49/0xbe
+[ 59.253837]
+[ 59.254203] Freed by task 187:
+[ 59.254811] save_stack+0x19/0x80
+[ 59.255463] __kasan_slab_free+0x125/0x170
+[ 59.256265] kfree+0x100/0x440
+[ 59.256870] nsim_dev_remove+0x98/0x100
+[ 59.257651] nsim_bus_remove+0x16/0x20
+[ 59.258382] device_release_driver_internal+0x20b/0x4d0
+[ 59.259588] bus_remove_device+0x2e9/0x5a0
+[ 59.260551] device_del+0x410/0xad0
+[ 59.263777] device_unregister+0x26/0xc0
+[ 59.264616] nsim_bus_dev_del+0x16/0x60
+[ 59.265381] del_device_store+0x2d6/0x3c0
+[ 59.266295] bus_attr_store+0x7b/0xa0
+[ 59.267192] sysfs_kf_write+0x10f/0x170
+[ 59.267960] kernfs_fop_write+0x283/0x430
+[ 59.268800] __vfs_write+0x81/0x100
+[ 59.269551] vfs_write+0x1ce/0x510
+[ 59.270252] ksys_write+0x104/0x200
+[ 59.270910] do_syscall_64+0xa4/0x4e0
+[ 59.271680] entry_SYSCALL_64_after_hwframe+0x49/0xbe
+[ 59.272812]
+[ 59.273211] The buggy address belongs to the object at ffff8883cbdd3200
+[ 59.273211] which belongs to the cache kmalloc-512 of size 512
+[ 59.275838] The buggy address is located 408 bytes inside of
+[ 59.275838] 512-byte region [ffff8883cbdd3200, ffff8883cbdd3400)
+[ 59.278151] The buggy address belongs to the page:
+[ 59.279215] page:ffffea000f2f7400 refcount:1 mapcount:0 mapping:ffff8883ecc0ce00 index:0x0 compound_mapcount: 0
+[ 59.281449] flags: 0x200000000010200(slab|head)
+[ 59.282356] raw: 0200000000010200 ffffea000f2f3a08 ffffea000f2fd608 ffff8883ecc0ce00
+[ 59.283949] raw: 0000000000000000 0000000000150015 00000001ffffffff 0000000000000000
+[ 59.285608] page dumped because: kasan: bad access detected
+[ 59.286981]
+[ 59.287337] Memory state around the buggy address:
+[ 59.288310] ffff8883cbdd3280: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+[ 59.289763] ffff8883cbdd3300: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+[ 59.291452] >ffff8883cbdd3380: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+[ 59.292945] ^
+[ 59.293815] ffff8883cbdd3400: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+[ 59.295220] ffff8883cbdd3480: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+[ 59.296872] ==================================================================
+
+Fixes: da58f90f11f5 ("netdevsim: Add devlink-trap support")
+Signed-off-by: Ido Schimmel <idosch@mellanox.com>
+Reported-by: syzbot+9ed8f68ab30761f3678e@syzkaller.appspotmail.com
+Acked-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/netdevsim/dev.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/net/netdevsim/dev.c
++++ b/drivers/net/netdevsim/dev.c
+@@ -806,9 +806,11 @@ static void nsim_dev_port_del_all(struct
+ {
+ struct nsim_dev_port *nsim_dev_port, *tmp;
+
++ mutex_lock(&nsim_dev->port_list_lock);
+ list_for_each_entry_safe(nsim_dev_port, tmp,
+ &nsim_dev->port_list, list)
+ __nsim_dev_port_del(nsim_dev_port);
++ mutex_unlock(&nsim_dev->port_list_lock);
+ }
+
+ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
+@@ -822,14 +824,17 @@ int nsim_dev_probe(struct nsim_bus_dev *
+ return PTR_ERR(nsim_dev);
+ dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev);
+
++ mutex_lock(&nsim_dev->port_list_lock);
+ for (i = 0; i < nsim_bus_dev->port_count; i++) {
+ err = __nsim_dev_port_add(nsim_dev, i);
+ if (err)
+ goto err_port_del_all;
+ }
++ mutex_unlock(&nsim_dev->port_list_lock);
+ return 0;
+
+ err_port_del_all:
++ mutex_unlock(&nsim_dev->port_list_lock);
+ nsim_dev_port_del_all(nsim_dev);
+ nsim_dev_destroy(nsim_dev);
+ return err;
diff --git a/patches.suse/netfilter-conntrack-avoid-possible-false-sharing.patch b/patches.suse/netfilter-conntrack-avoid-possible-false-sharing.patch
new file mode 100644
index 0000000000..da82b6d751
--- /dev/null
+++ b/patches.suse/netfilter-conntrack-avoid-possible-false-sharing.patch
@@ -0,0 +1,88 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 9 Oct 2019 09:19:13 -0700
+Subject: netfilter: conntrack: avoid possible false sharing
+Patch-mainline: v5.4-rc4
+Git-commit: e37542ba111f3974dc622ae0a21c1787318de500
+References: bsc#1154353
+
+As hinted by KCSAN, we need at least one READ_ONCE()
+to prevent a compiler optimization.
+
+More details on :
+https://github.com/google/ktsan/wiki/READ_ONCE-and-WRITE_ONCE#it-may-improve-performance
+
+sysbot report :
+BUG: KCSAN: data-race in __nf_ct_refresh_acct / __nf_ct_refresh_acct
+
+read to 0xffff888123eb4f08 of 4 bytes by interrupt on cpu 0:
+ __nf_ct_refresh_acct+0xd4/0x1b0 net/netfilter/nf_conntrack_core.c:1796
+ nf_ct_refresh_acct include/net/netfilter/nf_conntrack.h:201 [inline]
+ nf_conntrack_tcp_packet+0xd40/0x3390 net/netfilter/nf_conntrack_proto_tcp.c:1161
+ nf_conntrack_handle_packet net/netfilter/nf_conntrack_core.c:1633 [inline]
+ nf_conntrack_in+0x410/0xaa0 net/netfilter/nf_conntrack_core.c:1727
+ ipv4_conntrack_in+0x27/0x40 net/netfilter/nf_conntrack_proto.c:178
+ nf_hook_entry_hookfn include/linux/netfilter.h:135 [inline]
+ nf_hook_slow+0x83/0x160 net/netfilter/core.c:512
+ nf_hook include/linux/netfilter.h:260 [inline]
+ NF_HOOK include/linux/netfilter.h:303 [inline]
+ ip_rcv+0x12f/0x1a0 net/ipv4/ip_input.c:523
+ __netif_receive_skb_one_core+0xa7/0xe0 net/core/dev.c:5004
+ __netif_receive_skb+0x37/0xf0 net/core/dev.c:5118
+ netif_receive_skb_internal+0x59/0x190 net/core/dev.c:5208
+ napi_skb_finish net/core/dev.c:5671 [inline]
+ napi_gro_receive+0x28f/0x330 net/core/dev.c:5704
+ receive_buf+0x284/0x30b0 drivers/net/virtio_net.c:1061
+ virtnet_receive drivers/net/virtio_net.c:1323 [inline]
+ virtnet_poll+0x436/0x7d0 drivers/net/virtio_net.c:1428
+ napi_poll net/core/dev.c:6352 [inline]
+ net_rx_action+0x3ae/0xa50 net/core/dev.c:6418
+ __do_softirq+0x115/0x33f kernel/softirq.c:292
+
+write to 0xffff888123eb4f08 of 4 bytes by task 7191 on cpu 1:
+ __nf_ct_refresh_acct+0xfb/0x1b0 net/netfilter/nf_conntrack_core.c:1797
+ nf_ct_refresh_acct include/net/netfilter/nf_conntrack.h:201 [inline]
+ nf_conntrack_tcp_packet+0xd40/0x3390 net/netfilter/nf_conntrack_proto_tcp.c:1161
+ nf_conntrack_handle_packet net/netfilter/nf_conntrack_core.c:1633 [inline]
+ nf_conntrack_in+0x410/0xaa0 net/netfilter/nf_conntrack_core.c:1727
+ ipv4_conntrack_local+0xbe/0x130 net/netfilter/nf_conntrack_proto.c:200
+ nf_hook_entry_hookfn include/linux/netfilter.h:135 [inline]
+ nf_hook_slow+0x83/0x160 net/netfilter/core.c:512
+ nf_hook include/linux/netfilter.h:260 [inline]
+ __ip_local_out+0x1f7/0x2b0 net/ipv4/ip_output.c:114
+ ip_local_out+0x31/0x90 net/ipv4/ip_output.c:123
+ __ip_queue_xmit+0x3a8/0xa40 net/ipv4/ip_output.c:532
+ ip_queue_xmit+0x45/0x60 include/net/ip.h:236
+ __tcp_transmit_skb+0xdeb/0x1cd0 net/ipv4/tcp_output.c:1158
+ __tcp_send_ack+0x246/0x300 net/ipv4/tcp_output.c:3685
+ tcp_send_ack+0x34/0x40 net/ipv4/tcp_output.c:3691
+ tcp_cleanup_rbuf+0x130/0x360 net/ipv4/tcp.c:1575
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 1 PID: 7191 Comm: syz-fuzzer Not tainted 5.3.0+ #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+
+Fixes: cc16921351d8 ("netfilter: conntrack: avoid same-timeout update")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Cc: Jozsef Kadlecsik <kadlec@netfilter.org>
+Cc: Florian Westphal <fw@strlen.de>
+Acked-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/netfilter/nf_conntrack_core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -1793,8 +1793,8 @@ void __nf_ct_refresh_acct(struct nf_conn
+ if (nf_ct_is_confirmed(ct))
+ extra_jiffies += nfct_time_stamp;
+
+- if (ct->timeout != extra_jiffies)
+- ct->timeout = extra_jiffies;
++ if (READ_ONCE(ct->timeout) != extra_jiffies)
++ WRITE_ONCE(ct->timeout, extra_jiffies);
+ acct:
+ if (do_acct)
+ nf_ct_acct_update(ct, ctinfo, skb->len);
diff --git a/patches.suse/netfilter-nf_flow_table-set-timeout-before-insertion.patch b/patches.suse/netfilter-nf_flow_table-set-timeout-before-insertion.patch
new file mode 100644
index 0000000000..f67e066bd2
--- /dev/null
+++ b/patches.suse/netfilter-nf_flow_table-set-timeout-before-insertion.patch
@@ -0,0 +1,56 @@
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Mon, 14 Oct 2019 11:03:15 +0200
+Subject: netfilter: nf_flow_table: set timeout before insertion into hashes
+Patch-mainline: v5.4-rc6
+Git-commit: daf61b026f4686250e6afa619e6d7b49edc61df7
+References: bsc#1154353
+
+Other garbage collector might remove an entry not fully set up yet.
+
+[570953.958293] RIP: 0010:memcmp+0x9/0x50
+[...]
+[570953.958567] flow_offload_hash_cmp+0x1e/0x30 [nf_flow_table]
+[570953.958585] flow_offload_lookup+0x8c/0x110 [nf_flow_table]
+[570953.958606] nf_flow_offload_ip_hook+0x135/0xb30 [nf_flow_table]
+[570953.958624] nf_flow_offload_inet_hook+0x35/0x37 [nf_flow_table_inet]
+[570953.958646] nf_hook_slow+0x3c/0xb0
+[570953.958664] __netif_receive_skb_core+0x90f/0xb10
+[570953.958678] ? ip_rcv_finish+0x82/0xa0
+[570953.958692] __netif_receive_skb_one_core+0x3b/0x80
+[570953.958711] __netif_receive_skb+0x18/0x60
+[570953.958727] netif_receive_skb_internal+0x45/0xf0
+[570953.958741] napi_gro_receive+0xcd/0xf0
+[570953.958764] ixgbe_clean_rx_irq+0x432/0xe00 [ixgbe]
+[570953.958782] ixgbe_poll+0x27b/0x700 [ixgbe]
+[570953.958796] net_rx_action+0x284/0x3c0
+[570953.958817] __do_softirq+0xcc/0x27c
+[570953.959464] irq_exit+0xe8/0x100
+[570953.960097] do_IRQ+0x59/0xe0
+[570953.960734] common_interrupt+0xf/0xf
+
+Fixes: 43c8f131184f ("netfilter: nf_flow_table: fix missing error check for rhashtable_insert_fast")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/netfilter/nf_flow_table_core.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -201,6 +201,8 @@ int flow_offload_add(struct nf_flowtable
+ {
+ int err;
+
++ flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
++
+ err = rhashtable_insert_fast(&flow_table->rhashtable,
+ &flow->tuplehash[0].node,
+ nf_flow_offload_rhash_params);
+@@ -217,7 +219,6 @@ int flow_offload_add(struct nf_flowtable
+ return err;
+ }
+
+- flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(flow_offload_add);
diff --git a/patches.suse/netfilter-nft_payload-fix-missing-check-for-matching.patch b/patches.suse/netfilter-nft_payload-fix-missing-check-for-matching.patch
new file mode 100644
index 0000000000..4940d95053
--- /dev/null
+++ b/patches.suse/netfilter-nft_payload-fix-missing-check-for-matching.patch
@@ -0,0 +1,133 @@
+From: wenxu <wenxu@ucloud.cn>
+Date: Thu, 24 Oct 2019 15:52:45 +0800
+Subject: netfilter: nft_payload: fix missing check for matching length in
+ offloads
+Patch-mainline: v5.4-rc6
+Git-commit: a69a85da458f79088c38a38db034a4d64d9c32c3
+References: bsc#1154353
+
+Payload offload rule should also check the length of the match.
+Moreover, check for unsupported link-layer fields:
+
+ nft --debug=netlink add rule firewall zones vlan id 100
+ ...
+ [ payload load 2b @ link header + 0 => reg 1 ]
+
+this loads 2byte base on ll header and offset 0.
+
+This also fixes unsupported raw payload match.
+
+Fixes: 92ad6325cb89 ("netfilter: nf_tables: add hardware offload support")
+Signed-off-by: wenxu <wenxu@ucloud.cn>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/netfilter/nft_payload.c | 38 ++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 38 insertions(+)
+
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -161,13 +161,21 @@ static int nft_payload_offload_ll(struct
+
+ switch (priv->offset) {
+ case offsetof(struct ethhdr, h_source):
++ if (priv->len != ETH_ALEN)
++ return -EOPNOTSUPP;
++
+ NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
+ src, ETH_ALEN, reg);
+ break;
+ case offsetof(struct ethhdr, h_dest):
++ if (priv->len != ETH_ALEN)
++ return -EOPNOTSUPP;
++
+ NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
+ dst, ETH_ALEN, reg);
+ break;
++ default:
++ return -EOPNOTSUPP;
+ }
+
+ return 0;
+@@ -181,14 +189,23 @@ static int nft_payload_offload_ip(struct
+
+ switch (priv->offset) {
+ case offsetof(struct iphdr, saddr):
++ if (priv->len != sizeof(struct in_addr))
++ return -EOPNOTSUPP;
++
+ NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
+ sizeof(struct in_addr), reg);
+ break;
+ case offsetof(struct iphdr, daddr):
++ if (priv->len != sizeof(struct in_addr))
++ return -EOPNOTSUPP;
++
+ NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
+ sizeof(struct in_addr), reg);
+ break;
+ case offsetof(struct iphdr, protocol):
++ if (priv->len != sizeof(__u8))
++ return -EOPNOTSUPP;
++
+ NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
+ sizeof(__u8), reg);
+ nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
+@@ -208,14 +225,23 @@ static int nft_payload_offload_ip6(struc
+
+ switch (priv->offset) {
+ case offsetof(struct ipv6hdr, saddr):
++ if (priv->len != sizeof(struct in6_addr))
++ return -EOPNOTSUPP;
++
+ NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
+ sizeof(struct in6_addr), reg);
+ break;
+ case offsetof(struct ipv6hdr, daddr):
++ if (priv->len != sizeof(struct in6_addr))
++ return -EOPNOTSUPP;
++
+ NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
+ sizeof(struct in6_addr), reg);
+ break;
+ case offsetof(struct ipv6hdr, nexthdr):
++ if (priv->len != sizeof(__u8))
++ return -EOPNOTSUPP;
++
+ NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
+ sizeof(__u8), reg);
+ nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
+@@ -255,10 +281,16 @@ static int nft_payload_offload_tcp(struc
+
+ switch (priv->offset) {
+ case offsetof(struct tcphdr, source):
++ if (priv->len != sizeof(__be16))
++ return -EOPNOTSUPP;
++
+ NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
+ sizeof(__be16), reg);
+ break;
+ case offsetof(struct tcphdr, dest):
++ if (priv->len != sizeof(__be16))
++ return -EOPNOTSUPP;
++
+ NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
+ sizeof(__be16), reg);
+ break;
+@@ -277,10 +309,16 @@ static int nft_payload_offload_udp(struc
+
+ switch (priv->offset) {
+ case offsetof(struct udphdr, source):
++ if (priv->len != sizeof(__be16))
++ return -EOPNOTSUPP;
++
+ NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
+ sizeof(__be16), reg);
+ break;
+ case offsetof(struct udphdr, dest):
++ if (priv->len != sizeof(__be16))
++ return -EOPNOTSUPP;
++
+ NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
+ sizeof(__be16), reg);
+ break;
diff --git a/patches.suse/netns-fix-GFP-flags-in-rtnl_net_notifyid.patch b/patches.suse/netns-fix-GFP-flags-in-rtnl_net_notifyid.patch
new file mode 100644
index 0000000000..940f24fbce
--- /dev/null
+++ b/patches.suse/netns-fix-GFP-flags-in-rtnl_net_notifyid.patch
@@ -0,0 +1,284 @@
+From: Guillaume Nault <gnault@redhat.com>
+Date: Wed, 23 Oct 2019 18:39:04 +0200
+Subject: netns: fix GFP flags in rtnl_net_notifyid()
+Patch-mainline: v5.4-rc6
+Git-commit: d4e4fdf9e4a27c87edb79b1478955075be141f67
+References: bsc#1154353
+
+In rtnl_net_notifyid(), we certainly can't pass a null GFP flag to
+rtnl_notify(). A GFP_KERNEL flag would be fine in most circumstances,
+but there are a few paths calling rtnl_net_notifyid() from atomic
+context or from RCU critical sections. The later also precludes the use
+of gfp_any() as it wouldn't detect the RCU case. Also, the nlmsg_new()
+call is wrong too, as it uses GFP_KERNEL unconditionally.
+
+Therefore, we need to pass the GFP flags as parameter and propagate it
+through function calls until the proper flags can be determined.
+
+In most cases, GFP_KERNEL is fine. The exceptions are:
+ * openvswitch: ovs_vport_cmd_get() and ovs_vport_cmd_dump()
+ indirectly call rtnl_net_notifyid() from RCU critical section,
+
+ * rtnetlink: rtmsg_ifinfo_build_skb() already receives GFP flags as
+ parameter.
+
+Also, in ovs_vport_cmd_build_info(), let's change the GFP flags used
+by nlmsg_new(). The function is allowed to sleep, so better make the
+flags consistent with the ones used in the following
+ovs_vport_cmd_fill_info() call.
+
+Found by code inspection.
+
+Fixes: 9a9634545c70 ("netns: notify netns id events")
+Signed-off-by: Guillaume Nault <gnault@redhat.com>
+Acked-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Acked-by: Pravin B Shelar <pshelar@ovn.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ include/net/net_namespace.h | 2 +-
+ net/core/dev.c | 2 +-
+ net/core/net_namespace.c | 17 +++++++++--------
+ net/core/rtnetlink.c | 14 +++++++-------
+ net/openvswitch/datapath.c | 20 +++++++++++---------
+ 5 files changed, 29 insertions(+), 26 deletions(-)
+
+--- a/include/net/net_namespace.h
++++ b/include/net/net_namespace.h
+@@ -339,7 +339,7 @@ static inline struct net *read_pnet(cons
+ #define __net_initconst __initconst
+ #endif
+
+-int peernet2id_alloc(struct net *net, struct net *peer);
++int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp);
+ int peernet2id(struct net *net, struct net *peer);
+ bool peernet_has_id(struct net *net, struct net *peer);
+ struct net *get_net_ns_by_id(struct net *net, int id);
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -9770,7 +9770,7 @@ int dev_change_net_namespace(struct net_
+ call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
+ rcu_barrier();
+
+- new_nsid = peernet2id_alloc(dev_net(dev), net);
++ new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
+ /* If there is an ifindex conflict assign a new one */
+ if (__dev_get_by_index(net, dev->ifindex))
+ new_ifindex = dev_new_index(net);
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -246,11 +246,11 @@ static int __peernet2id(struct net *net,
+ }
+
+ static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
+- struct nlmsghdr *nlh);
++ struct nlmsghdr *nlh, gfp_t gfp);
+ /* This function returns the id of a peer netns. If no id is assigned, one will
+ * be allocated and returned.
+ */
+-int peernet2id_alloc(struct net *net, struct net *peer)
++int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
+ {
+ bool alloc = false, alive = false;
+ int id;
+@@ -269,7 +269,7 @@ int peernet2id_alloc(struct net *net, st
+ id = __peernet2id_alloc(net, peer, &alloc);
+ spin_unlock_bh(&net->nsid_lock);
+ if (alloc && id >= 0)
+- rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL);
++ rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL, gfp);
+ if (alive)
+ put_net(peer);
+ return id;
+@@ -534,7 +534,8 @@ static void unhash_nsid(struct net *net,
+ idr_remove(&tmp->netns_ids, id);
+ spin_unlock_bh(&tmp->nsid_lock);
+ if (id >= 0)
+- rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL);
++ rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
++ GFP_KERNEL);
+ if (tmp == last)
+ break;
+ }
+@@ -767,7 +768,7 @@ static int rtnl_net_newid(struct sk_buff
+ spin_unlock_bh(&net->nsid_lock);
+ if (err >= 0) {
+ rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
+- nlh);
++ nlh, GFP_KERNEL);
+ err = 0;
+ } else if (err == -ENOSPC && nsid >= 0) {
+ err = -EEXIST;
+@@ -1055,7 +1056,7 @@ end:
+ }
+
+ static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
+- struct nlmsghdr *nlh)
++ struct nlmsghdr *nlh, gfp_t gfp)
+ {
+ struct net_fill_args fillargs = {
+ .portid = portid,
+@@ -1066,7 +1067,7 @@ static void rtnl_net_notifyid(struct net
+ struct sk_buff *msg;
+ int err = -ENOMEM;
+
+- msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
++ msg = nlmsg_new(rtnl_net_get_size(), gfp);
+ if (!msg)
+ goto out;
+
+@@ -1074,7 +1075,7 @@ static void rtnl_net_notifyid(struct net
+ if (err < 0)
+ goto err_out;
+
+- rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, 0);
++ rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, gfp);
+ return;
+
+ err_out:
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1523,7 +1523,7 @@ static noinline_for_stack int nla_put_if
+
+ static int rtnl_fill_link_netnsid(struct sk_buff *skb,
+ const struct net_device *dev,
+- struct net *src_net)
++ struct net *src_net, gfp_t gfp)
+ {
+ bool put_iflink = false;
+
+@@ -1531,7 +1531,7 @@ static int rtnl_fill_link_netnsid(struct
+ struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
+
+ if (!net_eq(dev_net(dev), link_net)) {
+- int id = peernet2id_alloc(src_net, link_net);
++ int id = peernet2id_alloc(src_net, link_net, gfp);
+
+ if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
+ return -EMSGSIZE;
+@@ -1589,7 +1589,7 @@ static int rtnl_fill_ifinfo(struct sk_bu
+ int type, u32 pid, u32 seq, u32 change,
+ unsigned int flags, u32 ext_filter_mask,
+ u32 event, int *new_nsid, int new_ifindex,
+- int tgt_netnsid)
++ int tgt_netnsid, gfp_t gfp)
+ {
+ struct ifinfomsg *ifm;
+ struct nlmsghdr *nlh;
+@@ -1681,7 +1681,7 @@ static int rtnl_fill_ifinfo(struct sk_bu
+ goto nla_put_failure;
+ }
+
+- if (rtnl_fill_link_netnsid(skb, dev, src_net))
++ if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp))
+ goto nla_put_failure;
+
+ if (new_nsid &&
+@@ -2001,7 +2001,7 @@ walk_entries:
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq, 0, flags,
+ ext_filter_mask, 0, NULL, 0,
+- netnsid);
++ netnsid, GFP_KERNEL);
+
+ if (err < 0) {
+ if (likely(skb->len))
+@@ -3360,7 +3360,7 @@ static int rtnl_getlink(struct sk_buff *
+ err = rtnl_fill_ifinfo(nskb, dev, net,
+ RTM_NEWLINK, NETLINK_CB(skb).portid,
+ nlh->nlmsg_seq, 0, 0, ext_filter_mask,
+- 0, NULL, 0, netnsid);
++ 0, NULL, 0, netnsid, GFP_KERNEL);
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in if_nlmsg_size */
+ WARN_ON(err == -EMSGSIZE);
+@@ -3472,7 +3472,7 @@ struct sk_buff *rtmsg_ifinfo_build_skb(i
+
+ err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
+ type, 0, 0, change, 0, 0, event,
+- new_nsid, new_ifindex, -1);
++ new_nsid, new_ifindex, -1, flags);
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in if_nlmsg_size() */
+ WARN_ON(err == -EMSGSIZE);
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -1850,7 +1850,7 @@ static struct genl_family dp_datapath_ge
+ /* Called with ovs_mutex or RCU read lock. */
+ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
+ struct net *net, u32 portid, u32 seq,
+- u32 flags, u8 cmd)
++ u32 flags, u8 cmd, gfp_t gfp)
+ {
+ struct ovs_header *ovs_header;
+ struct ovs_vport_stats vport_stats;
+@@ -1871,7 +1871,7 @@ static int ovs_vport_cmd_fill_info(struc
+ goto nla_put_failure;
+
+ if (!net_eq(net, dev_net(vport->dev))) {
+- int id = peernet2id_alloc(net, dev_net(vport->dev));
++ int id = peernet2id_alloc(net, dev_net(vport->dev), gfp);
+
+ if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id))
+ goto nla_put_failure;
+@@ -1912,11 +1912,12 @@ struct sk_buff *ovs_vport_cmd_build_info
+ struct sk_buff *skb;
+ int retval;
+
+- skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
++ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+- retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd);
++ retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd,
++ GFP_KERNEL);
+ BUG_ON(retval < 0);
+
+ return skb;
+@@ -2058,7 +2059,7 @@ restart:
+
+ err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
+ info->snd_portid, info->snd_seq, 0,
+- OVS_VPORT_CMD_NEW);
++ OVS_VPORT_CMD_NEW, GFP_KERNEL);
+
+ new_headroom = netdev_get_fwd_headroom(vport->dev);
+
+@@ -2119,7 +2120,7 @@ static int ovs_vport_cmd_set(struct sk_b
+
+ err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
+ info->snd_portid, info->snd_seq, 0,
+- OVS_VPORT_CMD_SET);
++ OVS_VPORT_CMD_SET, GFP_KERNEL);
+ BUG_ON(err < 0);
+
+ ovs_unlock();
+@@ -2159,7 +2160,7 @@ static int ovs_vport_cmd_del(struct sk_b
+
+ err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
+ info->snd_portid, info->snd_seq, 0,
+- OVS_VPORT_CMD_DEL);
++ OVS_VPORT_CMD_DEL, GFP_KERNEL);
+ BUG_ON(err < 0);
+
+ /* the vport deletion may trigger dp headroom update */
+@@ -2206,7 +2207,7 @@ static int ovs_vport_cmd_get(struct sk_b
+ goto exit_unlock_free;
+ err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
+ info->snd_portid, info->snd_seq, 0,
+- OVS_VPORT_CMD_GET);
++ OVS_VPORT_CMD_GET, GFP_ATOMIC);
+ BUG_ON(err < 0);
+ rcu_read_unlock();
+
+@@ -2242,7 +2243,8 @@ static int ovs_vport_cmd_dump(struct sk_
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI,
+- OVS_VPORT_CMD_GET) < 0)
++ OVS_VPORT_CMD_GET,
++ GFP_ATOMIC) < 0)
+ goto out;
+
+ j++;
diff --git a/patches.suse/netns-fix-NLM_F_ECHO-mechanism-for-RTM_NEWNSID.patch b/patches.suse/netns-fix-NLM_F_ECHO-mechanism-for-RTM_NEWNSID.patch
new file mode 100644
index 0000000000..defe71e2a1
--- /dev/null
+++ b/patches.suse/netns-fix-NLM_F_ECHO-mechanism-for-RTM_NEWNSID.patch
@@ -0,0 +1,85 @@
+From: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Date: Wed, 9 Oct 2019 11:19:10 +0200
+Subject: netns: fix NLM_F_ECHO mechanism for RTM_NEWNSID
+Patch-mainline: v5.4-rc4
+Git-commit: 993e4c929a073595d22c85f59082f0c387e31c21
+References: bsc#1154353
+
+The flag NLM_F_ECHO aims to reply to the user the message notified to all
+listeners.
+It was not the case with the command RTM_NEWNSID, let's fix this.
+
+Fixes: 0c7aecd4bde4 ("netns: add rtnl cmd to add and get peer netns ids")
+Reported-by: Guillaume Nault <gnault@redhat.com>
+Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Acked-by: Guillaume Nault <gnault@redhat.com>
+Tested-by: Guillaume Nault <gnault@redhat.com>
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/core/net_namespace.c | 17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -245,7 +245,8 @@ static int __peernet2id(struct net *net,
+ return __peernet2id_alloc(net, peer, &no);
+ }
+
+-static void rtnl_net_notifyid(struct net *net, int cmd, int id);
++static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
++ struct nlmsghdr *nlh);
+ /* This function returns the id of a peer netns. If no id is assigned, one will
+ * be allocated and returned.
+ */
+@@ -268,7 +269,7 @@ int peernet2id_alloc(struct net *net, st
+ id = __peernet2id_alloc(net, peer, &alloc);
+ spin_unlock_bh(&net->nsid_lock);
+ if (alloc && id >= 0)
+- rtnl_net_notifyid(net, RTM_NEWNSID, id);
++ rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL);
+ if (alive)
+ put_net(peer);
+ return id;
+@@ -532,7 +533,7 @@ static void unhash_nsid(struct net *net,
+ idr_remove(&tmp->netns_ids, id);
+ spin_unlock_bh(&tmp->nsid_lock);
+ if (id >= 0)
+- rtnl_net_notifyid(tmp, RTM_DELNSID, id);
++ rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL);
+ if (tmp == last)
+ break;
+ }
+@@ -764,7 +765,8 @@ static int rtnl_net_newid(struct sk_buff
+ err = alloc_netid(net, peer, nsid);
+ spin_unlock_bh(&net->nsid_lock);
+ if (err >= 0) {
+- rtnl_net_notifyid(net, RTM_NEWNSID, err);
++ rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
++ nlh);
+ err = 0;
+ } else if (err == -ENOSPC && nsid >= 0) {
+ err = -EEXIST;
+@@ -1051,9 +1053,12 @@ end:
+ return err < 0 ? err : skb->len;
+ }
+
+-static void rtnl_net_notifyid(struct net *net, int cmd, int id)
++static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
++ struct nlmsghdr *nlh)
+ {
+ struct net_fill_args fillargs = {
++ .portid = portid,
++ .seq = nlh ? nlh->nlmsg_seq : 0,
+ .cmd = cmd,
+ .nsid = id,
+ };
+@@ -1068,7 +1073,7 @@ static void rtnl_net_notifyid(struct net
+ if (err < 0)
+ goto err_out;
+
+- rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0);
++ rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, 0);
+ return;
+
+ err_out:
diff --git a/patches.suse/qed-Optimize-execution-time-for-nvm-attributes-confi.patch b/patches.suse/qed-Optimize-execution-time-for-nvm-attributes-confi.patch
new file mode 100644
index 0000000000..3eef7a9330
--- /dev/null
+++ b/patches.suse/qed-Optimize-execution-time-for-nvm-attributes-confi.patch
@@ -0,0 +1,85 @@
+From: Sudarsana Reddy Kalluru <skalluru@marvell.com>
+Date: Wed, 30 Oct 2019 01:39:58 -0700
+Subject: qed: Optimize execution time for nvm attributes configuration.
+Patch-mainline: v5.4-rc6
+Git-commit: c63b0968946b2d72178a92793bcc9439e19b385f
+References: jsc#SLE-8401
+
+Current implementation for nvm_attr configuration instructs the management
+FW to load/unload the nvm-cfg image for each user-provided attribute in
+the input file. This consumes lot of cycles even for few tens of
+attributes.
+This patch updates the implementation to perform load/commit of the config
+for every 50 attributes. After loading the nvm-image, MFW expects that
+config should be committed in a predefined timer value (5 sec), hence it's
+not possible to write large number of attributes in a single load/commit
+window. Hence performing the commits in chunks.
+
+Fixes: 0dabbe1bb3a4 ("qed: Add driver API for flashing the config attributes.")
+Signed-off-by: Sudarsana Reddy Kalluru <skalluru@marvell.com>
+Signed-off-by: Ariel Elior <aelior@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/qlogic/qed/qed_main.c | 27 +++++++++++++++++++++------
+ 1 file changed, 21 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
+@@ -67,10 +67,9 @@
+ #define QED_ROCE_QPS (8192)
+ #define QED_ROCE_DPIS (8)
+ #define QED_RDMA_SRQS QED_ROCE_QPS
+-#define QED_NVM_CFG_SET_FLAGS 0xE
+-#define QED_NVM_CFG_SET_PF_FLAGS 0x1E
+ #define QED_NVM_CFG_GET_FLAGS 0xA
+ #define QED_NVM_CFG_GET_PF_FLAGS 0x1A
++#define QED_NVM_CFG_MAX_ATTRS 50
+
+ static char version[] =
+ "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
+@@ -2255,6 +2254,7 @@ static int qed_nvm_flash_cfg_write(struc
+ {
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ u8 entity_id, len, buf[32];
++ bool need_nvm_init = true;
+ struct qed_ptt *ptt;
+ u16 cfg_id, count;
+ int rc = 0, i;
+@@ -2271,8 +2271,10 @@ static int qed_nvm_flash_cfg_write(struc
+
+ DP_VERBOSE(cdev, NETIF_MSG_DRV,
+ "Read config ids: num_attrs = %0d\n", count);
+- /* NVM CFG ID attributes */
+- for (i = 0; i < count; i++) {
++ /* NVM CFG ID attributes. Start loop index from 1 to avoid additional
++ * arithmetic operations in the implementation.
++ */
++ for (i = 1; i <= count; i++) {
+ cfg_id = *((u16 *)*data);
+ *data += 2;
+ entity_id = **data;
+@@ -2282,8 +2284,21 @@ static int qed_nvm_flash_cfg_write(struc
+ memcpy(buf, *data, len);
+ *data += len;
+
+- flags = entity_id ? QED_NVM_CFG_SET_PF_FLAGS :
+- QED_NVM_CFG_SET_FLAGS;
++ flags = 0;
++ if (need_nvm_init) {
++ flags |= QED_NVM_CFG_OPTION_INIT;
++ need_nvm_init = false;
++ }
++
++ /* Commit to flash and free the resources */
++ if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) {
++ flags |= QED_NVM_CFG_OPTION_COMMIT |
++ QED_NVM_CFG_OPTION_FREE;
++ need_nvm_init = true;
++ }
++
++ if (entity_id)
++ flags |= QED_NVM_CFG_OPTION_ENTITY_SEL;
+
+ DP_VERBOSE(cdev, NETIF_MSG_DRV,
+ "cfg_id = %d entity = %d len = %d\n", cfg_id,
diff --git a/patches.suse/qed-fix-spelling-mistake-queuess-queues.patch b/patches.suse/qed-fix-spelling-mistake-queuess-queues.patch
new file mode 100644
index 0000000000..063853f863
--- /dev/null
+++ b/patches.suse/qed-fix-spelling-mistake-queuess-queues.patch
@@ -0,0 +1,27 @@
+From: Colin Ian King <colin.king@canonical.com>
+Date: Wed, 30 Oct 2019 08:59:22 +0100
+Subject: qed: fix spelling mistake "queuess" -> "queues"
+Patch-mainline: v5.4-rc6
+Git-commit: dc99da4f31ce48d15684bde7916104064520025c
+References: jsc#SLE-8401
+
+There is a spelling misake in a DP_NOTICE message. Fix it.
+
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/qlogic/qed/qed_sriov.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+@@ -2005,7 +2005,7 @@ static void qed_iov_vf_mbx_stop_vport(st
+ (qed_iov_validate_active_txq(p_hwfn, vf))) {
+ vf->b_malicious = true;
+ DP_NOTICE(p_hwfn,
+- "VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n",
++ "VF [%02x] - considered malicious; Unable to stop RX/TX queues\n",
+ vf->abs_vf_id);
+ status = PFVF_STATUS_MALICIOUS;
+ goto out;
diff --git a/patches.suse/rxrpc-Fix-call-ref-leak.patch b/patches.suse/rxrpc-Fix-call-ref-leak.patch
new file mode 100644
index 0000000000..096316be67
--- /dev/null
+++ b/patches.suse/rxrpc-Fix-call-ref-leak.patch
@@ -0,0 +1,44 @@
+From: David Howells <dhowells@redhat.com>
+Date: Mon, 7 Oct 2019 10:58:28 +0100
+Subject: rxrpc: Fix call ref leak
+Patch-mainline: v5.4-rc4
+Git-commit: c48fc11b69e95007109206311b0187a3090591f3
+References: bsc#1154353
+
+When sendmsg() finds a call to continue on with, if the call is in an
+inappropriate state, it doesn't release the ref it just got on that call
+before returning an error.
+
+This causes the following symptom to show up with kasan:
+
+ BUG: KASAN: use-after-free in rxrpc_send_keepalive+0x8a2/0x940
+ net/rxrpc/output.c:635
+ Read of size 8 at addr ffff888064219698 by task kworker/0:3/11077
+
+where line 635 is:
+
+ whdr.epoch = htonl(peer->local->rxnet->epoch);
+
+The local endpoint (which cannot be pinned by the call) has been released,
+but not the peer (which is pinned by the call).
+
+Fix this by releasing the call in the error path.
+
+Fixes: 37411cad633f ("rxrpc: Fix potential NULL-pointer exception")
+Reported-by: syzbot+d850c266e3df14da1d31@syzkaller.appspotmail.com
+Signed-off-by: David Howells <dhowells@redhat.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/rxrpc/sendmsg.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/rxrpc/sendmsg.c
++++ b/net/rxrpc/sendmsg.c
+@@ -661,6 +661,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *
+ case RXRPC_CALL_SERVER_PREALLOC:
+ case RXRPC_CALL_SERVER_SECURING:
+ case RXRPC_CALL_SERVER_ACCEPTING:
++ rxrpc_put_call(call, rxrpc_call_put);
+ ret = -EBUSY;
+ goto error_release_sock;
+ default:
diff --git a/patches.suse/rxrpc-Fix-trace-after-put-looking-at-the-put-call-re.patch b/patches.suse/rxrpc-Fix-trace-after-put-looking-at-the-put-call-re.patch
new file mode 100644
index 0000000000..39e64a4045
--- /dev/null
+++ b/patches.suse/rxrpc-Fix-trace-after-put-looking-at-the-put-call-re.patch
@@ -0,0 +1,157 @@
+From: David Howells <dhowells@redhat.com>
+Date: Mon, 7 Oct 2019 10:58:29 +0100
+Subject: rxrpc: Fix trace-after-put looking at the put call record
+Patch-mainline: v5.4-rc4
+Git-commit: 48c9e0ec7cbbb7370448f859ccc8e3b7eb69e755
+References: bsc#1154353
+
+rxrpc_put_call() calls trace_rxrpc_call() after it has done the decrement
+of the refcount - which looks at the debug_id in the call record. But
+unless the refcount was reduced to zero, we no longer have the right to
+look in the record and, indeed, it may be deleted by some other thread.
+
+Fix this by getting the debug_id out before decrementing the refcount and
+then passing that into the tracepoint.
+
+Fixes: e34d4234b0b7 ("rxrpc: Trace rxrpc_call usage")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ include/trace/events/rxrpc.h | 6 +++---
+ net/rxrpc/call_accept.c | 2 +-
+ net/rxrpc/call_object.c | 28 +++++++++++++++++-----------
+ 3 files changed, 21 insertions(+), 15 deletions(-)
+
+--- a/include/trace/events/rxrpc.h
++++ b/include/trace/events/rxrpc.h
+@@ -606,10 +606,10 @@ TRACE_EVENT(rxrpc_client,
+ );
+
+ TRACE_EVENT(rxrpc_call,
+- TP_PROTO(struct rxrpc_call *call, enum rxrpc_call_trace op,
++ TP_PROTO(unsigned int call_debug_id, enum rxrpc_call_trace op,
+ int usage, const void *where, const void *aux),
+
+- TP_ARGS(call, op, usage, where, aux),
++ TP_ARGS(call_debug_id, op, usage, where, aux),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call )
+@@ -620,7 +620,7 @@ TRACE_EVENT(rxrpc_call,
+ ),
+
+ TP_fast_assign(
+- __entry->call = call->debug_id;
++ __entry->call = call_debug_id;
+ __entry->op = op;
+ __entry->usage = usage;
+ __entry->where = where;
+--- a/net/rxrpc/call_accept.c
++++ b/net/rxrpc/call_accept.c
+@@ -97,7 +97,7 @@ static int rxrpc_service_prealloc_one(st
+ call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
+ call->state = RXRPC_CALL_SERVER_PREALLOC;
+
+- trace_rxrpc_call(call, rxrpc_call_new_service,
++ trace_rxrpc_call(call->debug_id, rxrpc_call_new_service,
+ atomic_read(&call->usage),
+ here, (const void *)user_call_ID);
+
+--- a/net/rxrpc/call_object.c
++++ b/net/rxrpc/call_object.c
+@@ -240,7 +240,8 @@ struct rxrpc_call *rxrpc_new_client_call
+ if (p->intr)
+ __set_bit(RXRPC_CALL_IS_INTR, &call->flags);
+ call->tx_total_len = p->tx_total_len;
+- trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
++ trace_rxrpc_call(call->debug_id, rxrpc_call_new_client,
++ atomic_read(&call->usage),
+ here, (const void *)p->user_call_ID);
+
+ /* We need to protect a partially set up call against the user as we
+@@ -290,8 +291,8 @@ struct rxrpc_call *rxrpc_new_client_call
+ if (ret < 0)
+ goto error;
+
+- trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
+- here, NULL);
++ trace_rxrpc_call(call->debug_id, rxrpc_call_connected,
++ atomic_read(&call->usage), here, NULL);
+
+ rxrpc_start_call_timer(call);
+
+@@ -313,8 +314,8 @@ error_dup_user_ID:
+ error:
+ __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
+ RX_CALL_DEAD, ret);
+- trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
+- here, ERR_PTR(ret));
++ trace_rxrpc_call(call->debug_id, rxrpc_call_error,
++ atomic_read(&call->usage), here, ERR_PTR(ret));
+ rxrpc_release_call(rx, call);
+ mutex_unlock(&call->user_mutex);
+ rxrpc_put_call(call, rxrpc_call_put);
+@@ -376,7 +377,8 @@ bool rxrpc_queue_call(struct rxrpc_call
+ if (n == 0)
+ return false;
+ if (rxrpc_queue_work(&call->processor))
+- trace_rxrpc_call(call, rxrpc_call_queued, n + 1, here, NULL);
++ trace_rxrpc_call(call->debug_id, rxrpc_call_queued, n + 1,
++ here, NULL);
+ else
+ rxrpc_put_call(call, rxrpc_call_put_noqueue);
+ return true;
+@@ -391,7 +393,8 @@ bool __rxrpc_queue_call(struct rxrpc_cal
+ int n = atomic_read(&call->usage);
+ ASSERTCMP(n, >=, 1);
+ if (rxrpc_queue_work(&call->processor))
+- trace_rxrpc_call(call, rxrpc_call_queued_ref, n, here, NULL);
++ trace_rxrpc_call(call->debug_id, rxrpc_call_queued_ref, n,
++ here, NULL);
+ else
+ rxrpc_put_call(call, rxrpc_call_put_noqueue);
+ return true;
+@@ -406,7 +409,8 @@ void rxrpc_see_call(struct rxrpc_call *c
+ if (call) {
+ int n = atomic_read(&call->usage);
+
+- trace_rxrpc_call(call, rxrpc_call_seen, n, here, NULL);
++ trace_rxrpc_call(call->debug_id, rxrpc_call_seen, n,
++ here, NULL);
+ }
+ }
+
+@@ -418,7 +422,7 @@ void rxrpc_get_call(struct rxrpc_call *c
+ const void *here = __builtin_return_address(0);
+ int n = atomic_inc_return(&call->usage);
+
+- trace_rxrpc_call(call, op, n, here, NULL);
++ trace_rxrpc_call(call->debug_id, op, n, here, NULL);
+ }
+
+ /*
+@@ -445,7 +449,8 @@ void rxrpc_release_call(struct rxrpc_soc
+
+ _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
+
+- trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
++ trace_rxrpc_call(call->debug_id, rxrpc_call_release,
++ atomic_read(&call->usage),
+ here, (const void *)call->flags);
+
+ ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
+@@ -532,12 +537,13 @@ void rxrpc_put_call(struct rxrpc_call *c
+ {
+ struct rxrpc_net *rxnet = call->rxnet;
+ const void *here = __builtin_return_address(0);
++ unsigned int debug_id = call->debug_id;
+ int n;
+
+ ASSERT(call != NULL);
+
+ n = atomic_dec_return(&call->usage);
+- trace_rxrpc_call(call, op, n, here, NULL);
++ trace_rxrpc_call(debug_id, op, n, here, NULL);
+ ASSERTCMP(n, >=, 0);
+ if (n == 0) {
+ _debug("call %d dead", call->debug_id);
diff --git a/patches.suse/rxrpc-Fix-trace-after-put-looking-at-the-put-connect.patch b/patches.suse/rxrpc-Fix-trace-after-put-looking-at-the-put-connect.patch
new file mode 100644
index 0000000000..fb2402cbb9
--- /dev/null
+++ b/patches.suse/rxrpc-Fix-trace-after-put-looking-at-the-put-connect.patch
@@ -0,0 +1,159 @@
+From: David Howells <dhowells@redhat.com>
+Date: Mon, 7 Oct 2019 10:58:29 +0100
+Subject: rxrpc: Fix trace-after-put looking at the put connection record
+Patch-mainline: v5.4-rc4
+Git-commit: 4c1295dccc0afe0905b6ca4c62ade7f2406f2cfb
+References: bsc#1154353
+
+rxrpc_put_*conn() calls trace_rxrpc_conn() after they have done the
+decrement of the refcount - which looks at the debug_id in the connection
+record. But unless the refcount was reduced to zero, we no longer have the
+right to look in the record and, indeed, it may be deleted by some other
+thread.
+
+Fix this by getting the debug_id out before decrementing the refcount and
+then passing that into the tracepoint.
+
+Fixes: 363deeab6d0f ("rxrpc: Add connection tracepoint and client conn state tracepoint")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ include/trace/events/rxrpc.h | 6 +++---
+ net/rxrpc/call_accept.c | 2 +-
+ net/rxrpc/conn_client.c | 6 ++++--
+ net/rxrpc/conn_object.c | 13 +++++++------
+ net/rxrpc/conn_service.c | 2 +-
+ 5 files changed, 16 insertions(+), 13 deletions(-)
+
+--- a/include/trace/events/rxrpc.h
++++ b/include/trace/events/rxrpc.h
+@@ -546,10 +546,10 @@ TRACE_EVENT(rxrpc_peer,
+ );
+
+ TRACE_EVENT(rxrpc_conn,
+- TP_PROTO(struct rxrpc_connection *conn, enum rxrpc_conn_trace op,
++ TP_PROTO(unsigned int conn_debug_id, enum rxrpc_conn_trace op,
+ int usage, const void *where),
+
+- TP_ARGS(conn, op, usage, where),
++ TP_ARGS(conn_debug_id, op, usage, where),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, conn )
+@@ -559,7 +559,7 @@ TRACE_EVENT(rxrpc_conn,
+ ),
+
+ TP_fast_assign(
+- __entry->conn = conn->debug_id;
++ __entry->conn = conn_debug_id;
+ __entry->op = op;
+ __entry->usage = usage;
+ __entry->where = where;
+--- a/net/rxrpc/call_accept.c
++++ b/net/rxrpc/call_accept.c
+@@ -84,7 +84,7 @@ static int rxrpc_service_prealloc_one(st
+ smp_store_release(&b->conn_backlog_head,
+ (head + 1) & (size - 1));
+
+- trace_rxrpc_conn(conn, rxrpc_conn_new_service,
++ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
+ atomic_read(&conn->usage), here);
+ }
+
+--- a/net/rxrpc/conn_client.c
++++ b/net/rxrpc/conn_client.c
+@@ -212,7 +212,8 @@ rxrpc_alloc_client_connection(struct rxr
+ rxrpc_get_local(conn->params.local);
+ key_get(conn->params.key);
+
+- trace_rxrpc_conn(conn, rxrpc_conn_new_client, atomic_read(&conn->usage),
++ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_client,
++ atomic_read(&conn->usage),
+ __builtin_return_address(0));
+ trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
+ _leave(" = %p", conn);
+@@ -985,11 +986,12 @@ rxrpc_put_one_client_conn(struct rxrpc_c
+ void rxrpc_put_client_conn(struct rxrpc_connection *conn)
+ {
+ const void *here = __builtin_return_address(0);
++ unsigned int debug_id = conn->debug_id;
+ int n;
+
+ do {
+ n = atomic_dec_return(&conn->usage);
+- trace_rxrpc_conn(conn, rxrpc_conn_put_client, n, here);
++ trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, n, here);
+ if (n > 0)
+ return;
+ ASSERTCMP(n, >=, 0);
+--- a/net/rxrpc/conn_object.c
++++ b/net/rxrpc/conn_object.c
+@@ -269,7 +269,7 @@ bool rxrpc_queue_conn(struct rxrpc_conne
+ if (n == 0)
+ return false;
+ if (rxrpc_queue_work(&conn->processor))
+- trace_rxrpc_conn(conn, rxrpc_conn_queued, n + 1, here);
++ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_queued, n + 1, here);
+ else
+ rxrpc_put_connection(conn);
+ return true;
+@@ -284,7 +284,7 @@ void rxrpc_see_connection(struct rxrpc_c
+ if (conn) {
+ int n = atomic_read(&conn->usage);
+
+- trace_rxrpc_conn(conn, rxrpc_conn_seen, n, here);
++ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_seen, n, here);
+ }
+ }
+
+@@ -296,7 +296,7 @@ void rxrpc_get_connection(struct rxrpc_c
+ const void *here = __builtin_return_address(0);
+ int n = atomic_inc_return(&conn->usage);
+
+- trace_rxrpc_conn(conn, rxrpc_conn_got, n, here);
++ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, n, here);
+ }
+
+ /*
+@@ -310,7 +310,7 @@ rxrpc_get_connection_maybe(struct rxrpc_
+ if (conn) {
+ int n = atomic_fetch_add_unless(&conn->usage, 1, 0);
+ if (n > 0)
+- trace_rxrpc_conn(conn, rxrpc_conn_got, n + 1, here);
++ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, n + 1, here);
+ else
+ conn = NULL;
+ }
+@@ -333,10 +333,11 @@ static void rxrpc_set_service_reap_timer
+ void rxrpc_put_service_conn(struct rxrpc_connection *conn)
+ {
+ const void *here = __builtin_return_address(0);
++ unsigned int debug_id = conn->debug_id;
+ int n;
+
+ n = atomic_dec_return(&conn->usage);
+- trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here);
++ trace_rxrpc_conn(debug_id, rxrpc_conn_put_service, n, here);
+ ASSERTCMP(n, >=, 0);
+ if (n == 1)
+ rxrpc_set_service_reap_timer(conn->params.local->rxnet,
+@@ -420,7 +421,7 @@ void rxrpc_service_connection_reaper(str
+ */
+ if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
+ continue;
+- trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, NULL);
++ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_reap_service, 0, NULL);
+
+ if (rxrpc_conn_is_client(conn))
+ BUG();
+--- a/net/rxrpc/conn_service.c
++++ b/net/rxrpc/conn_service.c
+@@ -134,7 +134,7 @@ struct rxrpc_connection *rxrpc_prealloc_
+ list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
+ write_unlock(&rxnet->conn_lock);
+
+- trace_rxrpc_conn(conn, rxrpc_conn_new_service,
++ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
+ atomic_read(&conn->usage),
+ __builtin_return_address(0));
+ }
diff --git a/patches.suse/rxrpc-Fix-trace-after-put-looking-at-the-put-peer-re.patch b/patches.suse/rxrpc-Fix-trace-after-put-looking-at-the-put-peer-re.patch
new file mode 100644
index 0000000000..0720423310
--- /dev/null
+++ b/patches.suse/rxrpc-Fix-trace-after-put-looking-at-the-put-peer-re.patch
@@ -0,0 +1,104 @@
+From: David Howells <dhowells@redhat.com>
+Date: Mon, 7 Oct 2019 10:58:29 +0100
+Subject: rxrpc: Fix trace-after-put looking at the put peer record
+Patch-mainline: v5.4-rc4
+Git-commit: 55f6c98e3674ce16038a1949c3f9ca5a9a99f289
+References: bsc#1154353
+
+rxrpc_put_peer() calls trace_rxrpc_peer() after it has done the decrement
+of the refcount - which looks at the debug_id in the peer record. But
+unless the refcount was reduced to zero, we no longer have the right to
+look in the record and, indeed, it may be deleted by some other thread.
+
+Fix this by getting the debug_id out before decrementing the refcount and
+then passing that into the tracepoint.
+
+This can cause the following symptoms:
+
+ BUG: KASAN: use-after-free in __rxrpc_put_peer net/rxrpc/peer_object.c:411
+ [inline]
+ BUG: KASAN: use-after-free in rxrpc_put_peer+0x685/0x6a0
+ net/rxrpc/peer_object.c:435
+ Read of size 8 at addr ffff888097ec0058 by task syz-executor823/24216
+
+Fixes: 1159d4b496f5 ("rxrpc: Add a tracepoint to track rxrpc_peer refcounting")
+Reported-by: syzbot+b9be979c55f2bea8ed30@syzkaller.appspotmail.com
+Signed-off-by: David Howells <dhowells@redhat.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ include/trace/events/rxrpc.h | 6 +++---
+ net/rxrpc/peer_object.c | 11 +++++++----
+ 2 files changed, 10 insertions(+), 7 deletions(-)
+
+--- a/include/trace/events/rxrpc.h
++++ b/include/trace/events/rxrpc.h
+@@ -519,10 +519,10 @@ TRACE_EVENT(rxrpc_local,
+ );
+
+ TRACE_EVENT(rxrpc_peer,
+- TP_PROTO(struct rxrpc_peer *peer, enum rxrpc_peer_trace op,
++ TP_PROTO(unsigned int peer_debug_id, enum rxrpc_peer_trace op,
+ int usage, const void *where),
+
+- TP_ARGS(peer, op, usage, where),
++ TP_ARGS(peer_debug_id, op, usage, where),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, peer )
+@@ -532,7 +532,7 @@ TRACE_EVENT(rxrpc_peer,
+ ),
+
+ TP_fast_assign(
+- __entry->peer = peer->debug_id;
++ __entry->peer = peer_debug_id;
+ __entry->op = op;
+ __entry->usage = usage;
+ __entry->where = where;
+--- a/net/rxrpc/peer_object.c
++++ b/net/rxrpc/peer_object.c
+@@ -382,7 +382,7 @@ struct rxrpc_peer *rxrpc_get_peer(struct
+ int n;
+
+ n = atomic_inc_return(&peer->usage);
+- trace_rxrpc_peer(peer, rxrpc_peer_got, n, here);
++ trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n, here);
+ return peer;
+ }
+
+@@ -396,7 +396,7 @@ struct rxrpc_peer *rxrpc_get_peer_maybe(
+ if (peer) {
+ int n = atomic_fetch_add_unless(&peer->usage, 1, 0);
+ if (n > 0)
+- trace_rxrpc_peer(peer, rxrpc_peer_got, n + 1, here);
++ trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n + 1, here);
+ else
+ peer = NULL;
+ }
+@@ -426,11 +426,13 @@ static void __rxrpc_put_peer(struct rxrp
+ void rxrpc_put_peer(struct rxrpc_peer *peer)
+ {
+ const void *here = __builtin_return_address(0);
++ unsigned int debug_id;
+ int n;
+
+ if (peer) {
++ debug_id = peer->debug_id;
+ n = atomic_dec_return(&peer->usage);
+- trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
++ trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here);
+ if (n == 0)
+ __rxrpc_put_peer(peer);
+ }
+@@ -443,10 +445,11 @@ void rxrpc_put_peer(struct rxrpc_peer *p
+ void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
+ {
+ const void *here = __builtin_return_address(0);
++ unsigned int debug_id = peer->debug_id;
+ int n;
+
+ n = atomic_dec_return(&peer->usage);
+- trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
++ trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here);
+ if (n == 0) {
+ hash_del_rcu(&peer->hash_link);
+ list_del_init(&peer->keepalive_link);
diff --git a/patches.suse/rxrpc-rxrpc_peer-needs-to-hold-a-ref-on-the-rxrpc_lo.patch b/patches.suse/rxrpc-rxrpc_peer-needs-to-hold-a-ref-on-the-rxrpc_lo.patch
new file mode 100644
index 0000000000..d76c21c9fc
--- /dev/null
+++ b/patches.suse/rxrpc-rxrpc_peer-needs-to-hold-a-ref-on-the-rxrpc_lo.patch
@@ -0,0 +1,67 @@
+From: David Howells <dhowells@redhat.com>
+Date: Mon, 7 Oct 2019 10:58:29 +0100
+Subject: rxrpc: rxrpc_peer needs to hold a ref on the rxrpc_local record
+Patch-mainline: v5.4-rc4
+Git-commit: 9ebeddef58c41bd700419cdcece24cf64ce32276
+References: bsc#1154353
+
+The rxrpc_peer record needs to hold a reference on the rxrpc_local record
+it points as the peer is used as a base to access information in the
+rxrpc_local record.
+
+This can cause problems in __rxrpc_put_peer(), where we need the network
+namespace pointer, and in rxrpc_send_keepalive(), where we need to access
+the UDP socket, leading to symptoms like:
+
+ BUG: KASAN: use-after-free in __rxrpc_put_peer net/rxrpc/peer_object.c:411
+ [inline]
+ BUG: KASAN: use-after-free in rxrpc_put_peer+0x685/0x6a0
+ net/rxrpc/peer_object.c:435
+ Read of size 8 at addr ffff888097ec0058 by task syz-executor823/24216
+
+Fix this by taking a ref on the local record for the peer record.
+
+Fixes: ace45bec6d77 ("rxrpc: Fix firewall route keepalive")
+Fixes: 2baec2c3f854 ("rxrpc: Support network namespacing")
+Reported-by: syzbot+b9be979c55f2bea8ed30@syzkaller.appspotmail.com
+Signed-off-by: David Howells <dhowells@redhat.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/rxrpc/peer_object.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/net/rxrpc/peer_object.c
++++ b/net/rxrpc/peer_object.c
+@@ -216,7 +216,7 @@ struct rxrpc_peer *rxrpc_alloc_peer(stru
+ peer = kzalloc(sizeof(struct rxrpc_peer), gfp);
+ if (peer) {
+ atomic_set(&peer->usage, 1);
+- peer->local = local;
++ peer->local = rxrpc_get_local(local);
+ INIT_HLIST_HEAD(&peer->error_targets);
+ peer->service_conns = RB_ROOT;
+ seqlock_init(&peer->service_conn_lock);
+@@ -307,7 +307,6 @@ void rxrpc_new_incoming_peer(struct rxrp
+ unsigned long hash_key;
+
+ hash_key = rxrpc_peer_hash_key(local, &peer->srx);
+- peer->local = local;
+ rxrpc_init_peer(rx, peer, hash_key);
+
+ spin_lock(&rxnet->peer_hash_lock);
+@@ -417,6 +416,7 @@ static void __rxrpc_put_peer(struct rxrp
+ list_del_init(&peer->keepalive_link);
+ spin_unlock_bh(&rxnet->peer_hash_lock);
+
++ rxrpc_put_local(peer->local);
+ kfree_rcu(peer, rcu);
+ }
+
+@@ -453,6 +453,7 @@ void rxrpc_put_peer_locked(struct rxrpc_
+ if (n == 0) {
+ hash_del_rcu(&peer->hash_link);
+ list_del_init(&peer->keepalive_link);
++ rxrpc_put_local(peer->local);
+ kfree_rcu(peer, rcu);
+ }
+ }
diff --git a/patches.suse/sctp-add-chunks-to-sk_backlog-when-the-newsk-sk_sock.patch b/patches.suse/sctp-add-chunks-to-sk_backlog-when-the-newsk-sk_sock.patch
new file mode 100644
index 0000000000..03499f8653
--- /dev/null
+++ b/patches.suse/sctp-add-chunks-to-sk_backlog-when-the-newsk-sk_sock.patch
@@ -0,0 +1,120 @@
+From: Xin Long <lucien.xin@gmail.com>
+Date: Tue, 8 Oct 2019 19:09:23 +0800
+Subject: sctp: add chunks to sk_backlog when the newsk sk_socket is not set
+Patch-mainline: v5.4-rc4
+Git-commit: 819be8108fded0b9e710bbbf81193e52f7bab2f7
+References: bsc#1154353
+
+This patch is to fix a NULL-ptr deref in selinux_socket_connect_helper:
+
+ [...] kasan: GPF could be caused by NULL-ptr deref or user memory access
+ [...] RIP: 0010:selinux_socket_connect_helper+0x94/0x460
+ [...] Call Trace:
+ [...] selinux_sctp_bind_connect+0x16a/0x1d0
+ [...] security_sctp_bind_connect+0x58/0x90
+ [...] sctp_process_asconf+0xa52/0xfd0 [sctp]
+ [...] sctp_sf_do_asconf+0x785/0x980 [sctp]
+ [...] sctp_do_sm+0x175/0x5a0 [sctp]
+ [...] sctp_assoc_bh_rcv+0x285/0x5b0 [sctp]
+ [...] sctp_backlog_rcv+0x482/0x910 [sctp]
+ [...] __release_sock+0x11e/0x310
+ [...] release_sock+0x4f/0x180
+ [...] sctp_accept+0x3f9/0x5a0 [sctp]
+ [...] inet_accept+0xe7/0x720
+
+It was caused by that the 'newsk' sk_socket was not set before going to
+security sctp hook when processing asconf chunk with SCTP_PARAM_ADD_IP
+or SCTP_PARAM_SET_PRIMARY:
+
+ inet_accept()->
+ sctp_accept():
+ lock_sock():
+ lock listening 'sk'
+ do_softirq():
+ sctp_rcv(): <-- [1]
+ asconf chunk arrives and
+ enqueued in 'sk' backlog
+ sctp_sock_migrate():
+ set asoc's sk to 'newsk'
+ release_sock():
+ sctp_backlog_rcv():
+ lock 'newsk'
+ sctp_process_asconf() <-- [2]
+ unlock 'newsk'
+ sock_graft():
+ set sk_socket <-- [3]
+
+As it shows, at [1] the asconf chunk would be put into the listening 'sk'
+backlog, as accept() was holding its sock lock. Then at [2] asconf would
+get processed with 'newsk' as asoc's sk had been set to 'newsk'. However,
+'newsk' sk_socket is not set until [3], while selinux_sctp_bind_connect()
+would deref it, then kernel crashed.
+
+Here to fix it by adding the chunk to sk_backlog until newsk sk_socket is
+set when .accept() is done.
+
+Note that sk->sk_socket can be NULL when the sock is closed, so SOCK_DEAD
+flag is also needed to check in sctp_newsk_ready().
+
+Thanks to Ondrej for reviewing the code.
+
+Fixes: d452930fd3b9 ("selinux: Add SCTP support")
+Reported-by: Ying Xu <yinxu@redhat.com>
+Suggested-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Acked-by: Neil Horman <nhorman@tuxdriver.com>
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ include/net/sctp/sctp.h | 5 +++++
+ net/sctp/input.c | 12 +++++++++---
+ 2 files changed, 14 insertions(+), 3 deletions(-)
+
+--- a/include/net/sctp/sctp.h
++++ b/include/net/sctp/sctp.h
+@@ -610,4 +610,9 @@ static inline __u32 sctp_min_frag_point(
+ return sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, datasize);
+ }
+
++static inline bool sctp_newsk_ready(const struct sock *sk)
++{
++ return sock_flag(sk, SOCK_DEAD) || sk->sk_socket;
++}
++
+ #endif /* __net_sctp_h__ */
+--- a/net/sctp/input.c
++++ b/net/sctp/input.c
+@@ -243,7 +243,7 @@ int sctp_rcv(struct sk_buff *skb)
+ bh_lock_sock(sk);
+ }
+
+- if (sock_owned_by_user(sk)) {
++ if (sock_owned_by_user(sk) || !sctp_newsk_ready(sk)) {
+ if (sctp_add_backlog(sk, skb)) {
+ bh_unlock_sock(sk);
+ sctp_chunk_free(chunk);
+@@ -321,7 +321,7 @@ int sctp_backlog_rcv(struct sock *sk, st
+ local_bh_disable();
+ bh_lock_sock(sk);
+
+- if (sock_owned_by_user(sk)) {
++ if (sock_owned_by_user(sk) || !sctp_newsk_ready(sk)) {
+ if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
+ sctp_chunk_free(chunk);
+ else
+@@ -336,7 +336,13 @@ int sctp_backlog_rcv(struct sock *sk, st
+ if (backloged)
+ return 0;
+ } else {
+- sctp_inq_push(inqueue, chunk);
++ if (!sctp_newsk_ready(sk)) {
++ if (!sk_add_backlog(sk, skb, sk->sk_rcvbuf))
++ return 0;
++ sctp_chunk_free(chunk);
++ } else {
++ sctp_inq_push(inqueue, chunk);
++ }
+ }
+
+ done:
diff --git a/patches.suse/tcp-add-rcu-protection-around-tp-fastopen_rsk.patch b/patches.suse/tcp-add-rcu-protection-around-tp-fastopen_rsk.patch
new file mode 100644
index 0000000000..3939a19490
--- /dev/null
+++ b/patches.suse/tcp-add-rcu-protection-around-tp-fastopen_rsk.patch
@@ -0,0 +1,275 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 10 Oct 2019 20:17:38 -0700
+Subject: tcp: add rcu protection around tp->fastopen_rsk
+Patch-mainline: v5.4-rc4
+Git-commit: d983ea6f16b835dcde2ee9a58a1e764ce68bfccc
+References: bsc#1154353
+
+Both tcp_v4_err() and tcp_v6_err() do the following operations
+while they do not own the socket lock :
+
+ fastopen = tp->fastopen_rsk;
+ snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
+
+The problem is that without appropriate barrier, the compiler
+might reload tp->fastopen_rsk and trigger a NULL deref.
+
+request sockets are protected by RCU, we can simply add
+the missing annotations and barriers to solve the issue.
+
+Fixes: 168a8f58059a ("tcp: TCP Fast Open Server - main code path")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ include/linux/tcp.h | 6 +++---
+ net/core/request_sock.c | 2 +-
+ net/ipv4/inet_connection_sock.c | 4 ++--
+ net/ipv4/tcp.c | 11 ++++++++---
+ net/ipv4/tcp_fastopen.c | 2 +-
+ net/ipv4/tcp_input.c | 13 +++++++++----
+ net/ipv4/tcp_ipv4.c | 4 ++--
+ net/ipv4/tcp_minisocks.c | 2 +-
+ net/ipv4/tcp_output.c | 2 +-
+ net/ipv4/tcp_timer.c | 11 ++++++-----
+ net/ipv6/tcp_ipv6.c | 2 +-
+ 11 files changed, 35 insertions(+), 24 deletions(-)
+
+--- a/include/linux/tcp.h
++++ b/include/linux/tcp.h
+@@ -393,7 +393,7 @@ struct tcp_sock {
+ /* fastopen_rsk points to request_sock that resulted in this big
+ * socket. Used to retransmit SYNACKs etc.
+ */
+- struct request_sock *fastopen_rsk;
++ struct request_sock __rcu *fastopen_rsk;
+ u32 *saved_syn;
+ };
+
+@@ -447,8 +447,8 @@ static inline struct tcp_timewait_sock *
+
+ static inline bool tcp_passive_fastopen(const struct sock *sk)
+ {
+- return (sk->sk_state == TCP_SYN_RECV &&
+- tcp_sk(sk)->fastopen_rsk != NULL);
++ return sk->sk_state == TCP_SYN_RECV &&
++ rcu_access_pointer(tcp_sk(sk)->fastopen_rsk) != NULL;
+ }
+
+ static inline void fastopen_queue_tune(struct sock *sk, int backlog)
+--- a/net/core/request_sock.c
++++ b/net/core/request_sock.c
+@@ -96,7 +96,7 @@ void reqsk_fastopen_remove(struct sock *
+
+ fastopenq = &inet_csk(lsk)->icsk_accept_queue.fastopenq;
+
+- tcp_sk(sk)->fastopen_rsk = NULL;
++ RCU_INIT_POINTER(tcp_sk(sk)->fastopen_rsk, NULL);
+ spin_lock_bh(&fastopenq->lock);
+ fastopenq->qlen--;
+ tcp_rsk(req)->tfo_listener = false;
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -906,7 +906,7 @@ static void inet_child_forget(struct soc
+ percpu_counter_inc(sk->sk_prot->orphan_count);
+
+ if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
+- BUG_ON(tcp_sk(child)->fastopen_rsk != req);
++ BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req);
+ BUG_ON(sk != req->rsk_listener);
+
+ /* Paranoid, to prevent race condition if
+@@ -915,7 +915,7 @@ static void inet_child_forget(struct soc
+ * Also to satisfy an assertion in
+ * tcp_v4_destroy_sock().
+ */
+- tcp_sk(child)->fastopen_rsk = NULL;
++ RCU_INIT_POINTER(tcp_sk(child)->fastopen_rsk, NULL);
+ }
+ inet_csk_destroy_sock(child);
+ }
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -543,7 +543,7 @@ __poll_t tcp_poll(struct file *file, str
+
+ /* Connected or passive Fast Open socket? */
+ if (state != TCP_SYN_SENT &&
+- (state != TCP_SYN_RECV || tp->fastopen_rsk)) {
++ (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) {
+ int target = sock_rcvlowat(sk, 0, INT_MAX);
+
+ if (tp->urg_seq == tp->copied_seq &&
+@@ -2486,7 +2486,10 @@ adjudge_to_death:
+ }
+
+ if (sk->sk_state == TCP_CLOSE) {
+- struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
++ struct request_sock *req;
++
++ req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
++ lockdep_sock_is_held(sk));
+ /* We could get here with a non-NULL req if the socket is
+ * aborted (e.g., closed with unread data) before 3WHS
+ * finishes.
+@@ -3830,8 +3833,10 @@ EXPORT_SYMBOL(tcp_md5_hash_key);
+
+ void tcp_done(struct sock *sk)
+ {
+- struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
++ struct request_sock *req;
+
++ req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
++ lockdep_sock_is_held(sk));
+ if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
+ TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
+
+--- a/net/ipv4/tcp_fastopen.c
++++ b/net/ipv4/tcp_fastopen.c
+@@ -253,7 +253,7 @@ static struct sock *tcp_fastopen_create_
+ */
+ tp = tcp_sk(child);
+
+- tp->fastopen_rsk = req;
++ rcu_assign_pointer(tp->fastopen_rsk, req);
+ tcp_rsk(req)->tfo_listener = true;
+
+ /* RFC1323: The window in SYN & SYN/ACK segments is never
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2666,7 +2666,7 @@ static void tcp_process_loss(struct sock
+ struct tcp_sock *tp = tcp_sk(sk);
+ bool recovered = !before(tp->snd_una, tp->high_seq);
+
+- if ((flag & FLAG_SND_UNA_ADVANCED || tp->fastopen_rsk) &&
++ if ((flag & FLAG_SND_UNA_ADVANCED || rcu_access_pointer(tp->fastopen_rsk)) &&
+ tcp_try_undo_loss(sk, false))
+ return;
+
+@@ -2990,7 +2990,7 @@ void tcp_rearm_rto(struct sock *sk)
+ /* If the retrans timer is currently being used by Fast Open
+ * for SYN-ACK retrans purpose, stay put.
+ */
+- if (tp->fastopen_rsk)
++ if (rcu_access_pointer(tp->fastopen_rsk))
+ return;
+
+ if (!tp->packets_out) {
+@@ -6087,6 +6087,8 @@ reset_and_undo:
+
+ static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
+ {
++ struct request_sock *req;
++
+ tcp_try_undo_loss(sk, false);
+
+ /* Reset rtx states to prevent spurious retransmits_timed_out() */
+@@ -6096,7 +6098,9 @@ static void tcp_rcv_synrecv_state_fastop
+ /* Once we leave TCP_SYN_RECV or TCP_FIN_WAIT_1,
+ * we no longer need req so release it.
+ */
+- reqsk_fastopen_remove(sk, tcp_sk(sk)->fastopen_rsk, false);
++ req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
++ lockdep_sock_is_held(sk));
++ reqsk_fastopen_remove(sk, req, false);
+
+ /* Re-arm the timer because data may have been sent out.
+ * This is similar to the regular data transmission case
+@@ -6171,7 +6175,8 @@ int tcp_rcv_state_process(struct sock *s
+
+ tcp_mstamp_refresh(tp);
+ tp->rx_opt.saw_tstamp = 0;
+- req = tp->fastopen_rsk;
++ req = rcu_dereference_protected(tp->fastopen_rsk,
++ lockdep_sock_is_held(sk));
+ if (req) {
+ bool req_stolen;
+
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -478,7 +478,7 @@ int tcp_v4_err(struct sk_buff *icmp_skb,
+ icsk = inet_csk(sk);
+ tp = tcp_sk(sk);
+ /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
+- fastopen = tp->fastopen_rsk;
++ fastopen = rcu_dereference(tp->fastopen_rsk);
+ snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
+ if (sk->sk_state != TCP_LISTEN &&
+ !between(seq, snd_una, tp->snd_nxt)) {
+@@ -2117,7 +2117,7 @@ void tcp_v4_destroy_sock(struct sock *sk
+ if (inet_csk(sk)->icsk_bind_hash)
+ inet_put_port(sk);
+
+- BUG_ON(tp->fastopen_rsk);
++ BUG_ON(rcu_access_pointer(tp->fastopen_rsk));
+
+ /* If socket is aborted during connect operation */
+ tcp_free_fastopen_req(tp);
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -540,7 +540,7 @@ struct sock *tcp_create_openreq_child(co
+ newtp->rx_opt.mss_clamp = req->mss;
+ tcp_ecn_openreq_child(newtp, req);
+ newtp->fastopen_req = NULL;
+- newtp->fastopen_rsk = NULL;
++ RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
+
+ __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
+
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2471,7 +2471,7 @@ bool tcp_schedule_loss_probe(struct sock
+ /* Don't do any loss probe on a Fast Open connection before 3WHS
+ * finishes.
+ */
+- if (tp->fastopen_rsk)
++ if (rcu_access_pointer(tp->fastopen_rsk))
+ return false;
+
+ early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans;
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -386,15 +386,13 @@ abort: tcp_write_err(sk);
+ * Timer for Fast Open socket to retransmit SYNACK. Note that the
+ * sk here is the child socket, not the parent (listener) socket.
+ */
+-static void tcp_fastopen_synack_timer(struct sock *sk)
++static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
+ {
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ int max_retries = icsk->icsk_syn_retries ? :
+ sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
+ struct tcp_sock *tp = tcp_sk(sk);
+- struct request_sock *req;
+
+- req = tcp_sk(sk)->fastopen_rsk;
+ req->rsk_ops->syn_ack_timeout(req);
+
+ if (req->num_timeout >= max_retries) {
+@@ -435,11 +433,14 @@ void tcp_retransmit_timer(struct sock *s
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct net *net = sock_net(sk);
+ struct inet_connection_sock *icsk = inet_csk(sk);
++ struct request_sock *req;
+
+- if (tp->fastopen_rsk) {
++ req = rcu_dereference_protected(tp->fastopen_rsk,
++ lockdep_sock_is_held(sk));
++ if (req) {
+ WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
+ sk->sk_state != TCP_FIN_WAIT1);
+- tcp_fastopen_synack_timer(sk);
++ tcp_fastopen_synack_timer(sk, req);
+ /* Before we receive ACK to our SYN-ACK don't retransmit
+ * anything else (e.g., data or FIN segments).
+ */
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -406,7 +406,7 @@ static int tcp_v6_err(struct sk_buff *sk
+
+ tp = tcp_sk(sk);
+ /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
+- fastopen = tp->fastopen_rsk;
++ fastopen = rcu_dereference(tp->fastopen_rsk);
+ snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
+ if (sk->sk_state != TCP_LISTEN &&
+ !between(seq, snd_una, tp->snd_nxt)) {
diff --git a/patches.suse/tcp-annotate-lockless-access-to-tcp_memory_pressure.patch b/patches.suse/tcp-annotate-lockless-access-to-tcp_memory_pressure.patch
new file mode 100644
index 0000000000..776fc0e653
--- /dev/null
+++ b/patches.suse/tcp-annotate-lockless-access-to-tcp_memory_pressure.patch
@@ -0,0 +1,54 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 9 Oct 2019 15:10:15 -0700
+Subject: tcp: annotate lockless access to tcp_memory_pressure
+Patch-mainline: v5.4-rc4
+Git-commit: 1f142c17d19a5618d5a633195a46f2c8be9bf232
+References: bsc#1154353
+
+tcp_memory_pressure is read without holding any lock,
+and its value could be changed on other cpus.
+
+Use READ_ONCE() to annotate these lockless reads.
+
+The write side is already using atomic ops.
+
+Fixes: b8da51ebb1aa ("tcp: introduce tcp_under_memory_pressure()")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ include/net/tcp.h | 2 +-
+ net/ipv4/tcp.c | 4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -258,7 +258,7 @@ static inline bool tcp_under_memory_pres
+ mem_cgroup_under_socket_pressure(sk->sk_memcg))
+ return true;
+
+- return tcp_memory_pressure;
++ return READ_ONCE(tcp_memory_pressure);
+ }
+ /*
+ * The next routines deal with comparing 32 bit unsigned ints
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -326,7 +326,7 @@ void tcp_enter_memory_pressure(struct so
+ {
+ unsigned long val;
+
+- if (tcp_memory_pressure)
++ if (READ_ONCE(tcp_memory_pressure))
+ return;
+ val = jiffies;
+
+@@ -341,7 +341,7 @@ void tcp_leave_memory_pressure(struct so
+ {
+ unsigned long val;
+
+- if (!tcp_memory_pressure)
++ if (!READ_ONCE(tcp_memory_pressure))
+ return;
+ val = xchg(&tcp_memory_pressure, 0);
+ if (val)
diff --git a/patches.suse/tcp-annotate-sk-sk_rcvbuf-lockless-reads.patch b/patches.suse/tcp-annotate-sk-sk_rcvbuf-lockless-reads.patch
new file mode 100644
index 0000000000..bac289266c
--- /dev/null
+++ b/patches.suse/tcp-annotate-sk-sk_rcvbuf-lockless-reads.patch
@@ -0,0 +1,145 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 10 Oct 2019 20:17:44 -0700
+Subject: tcp: annotate sk->sk_rcvbuf lockless reads
+Patch-mainline: v5.4-rc4
+Git-commit: ebb3b78db7bf842270a46fd4fe7cc45c78fa5ed6
+References: bsc#1154353
+
+For the sake of tcp_poll(), there are few places where we fetch
+sk->sk_rcvbuf while this field can change from IRQ or other cpu.
+
+We need to add READ_ONCE() annotations, and also make sure write
+sides use corresponding WRITE_ONCE() to avoid store-tearing.
+
+Note that other transports probably need similar fixes.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ include/net/tcp.h | 4 ++--
+ include/trace/events/sock.h | 2 +-
+ net/core/filter.c | 3 ++-
+ net/core/skbuff.c | 2 +-
+ net/core/sock.c | 5 +++--
+ net/ipv4/tcp.c | 4 ++--
+ net/ipv4/tcp_input.c | 7 ++++---
+ 7 files changed, 15 insertions(+), 12 deletions(-)
+
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1380,14 +1380,14 @@ static inline int tcp_win_from_space(con
+ /* Note: caller must be prepared to deal with negative returns */
+ static inline int tcp_space(const struct sock *sk)
+ {
+- return tcp_win_from_space(sk, sk->sk_rcvbuf -
++ return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
+ READ_ONCE(sk->sk_backlog.len) -
+ atomic_read(&sk->sk_rmem_alloc));
+ }
+
+ static inline int tcp_full_space(const struct sock *sk)
+ {
+- return tcp_win_from_space(sk, sk->sk_rcvbuf);
++ return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
+ }
+
+ extern void tcp_openreq_init_rwin(struct request_sock *req,
+--- a/include/trace/events/sock.h
++++ b/include/trace/events/sock.h
+@@ -82,7 +82,7 @@ TRACE_EVENT(sock_rcvqueue_full,
+ TP_fast_assign(
+ __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
+ __entry->truesize = skb->truesize;
+- __entry->sk_rcvbuf = sk->sk_rcvbuf;
++ __entry->sk_rcvbuf = READ_ONCE(sk->sk_rcvbuf);
+ ),
+
+ TP_printk("rmem_alloc=%d truesize=%u sk_rcvbuf=%d",
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -4252,7 +4252,8 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_so
+ case SO_RCVBUF:
+ val = min_t(u32, val, sysctl_rmem_max);
+ sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
+- sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
++ WRITE_ONCE(sk->sk_rcvbuf,
++ max_t(int, val * 2, SOCK_MIN_RCVBUF));
+ break;
+ case SO_SNDBUF:
+ val = min_t(u32, val, sysctl_wmem_max);
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -4409,7 +4409,7 @@ static void skb_set_err_queue(struct sk_
+ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
+ {
+ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
+- (unsigned int)sk->sk_rcvbuf)
++ (unsigned int)READ_ONCE(sk->sk_rcvbuf))
+ return -ENOMEM;
+
+ skb_orphan(skb);
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -831,7 +831,8 @@ set_rcvbuf:
+ * returning the value we actually used in getsockopt
+ * is the most desirable behavior.
+ */
+- sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
++ WRITE_ONCE(sk->sk_rcvbuf,
++ max_t(int, val * 2, SOCK_MIN_RCVBUF));
+ break;
+
+ case SO_RCVBUFFORCE:
+@@ -3204,7 +3205,7 @@ void sk_get_meminfo(const struct sock *s
+ memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS);
+
+ mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
+- mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
++ mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf);
+ mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
+ mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
+ mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -451,7 +451,7 @@ void tcp_init_sock(struct sock *sk)
+ icsk->icsk_sync_mss = tcp_sync_mss;
+
+ sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1];
+- sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
++ WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]);
+
+ sk_sockets_allocated_inc(sk);
+ sk->sk_route_forced_caps = NETIF_F_GSO;
+@@ -1710,7 +1710,7 @@ int tcp_set_rcvlowat(struct sock *sk, in
+
+ val <<= 1;
+ if (val > sk->sk_rcvbuf) {
+- sk->sk_rcvbuf = val;
++ WRITE_ONCE(sk->sk_rcvbuf, val);
+ tcp_sk(sk)->window_clamp = tcp_win_from_space(sk, val);
+ }
+ return 0;
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -483,8 +483,9 @@ static void tcp_clamp_window(struct sock
+ !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
+ !tcp_under_memory_pressure(sk) &&
+ sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
+- sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
+- net->ipv4.sysctl_tcp_rmem[2]);
++ WRITE_ONCE(sk->sk_rcvbuf,
++ min(atomic_read(&sk->sk_rmem_alloc),
++ net->ipv4.sysctl_tcp_rmem[2]));
+ }
+ if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
+ tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss);
+@@ -648,7 +649,7 @@ void tcp_rcv_space_adjust(struct sock *s
+ rcvbuf = min_t(u64, rcvwin * rcvmem,
+ sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
+ if (rcvbuf > sk->sk_rcvbuf) {
+- sk->sk_rcvbuf = rcvbuf;
++ WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
+
+ /* Make the window clamp follow along. */
+ tp->window_clamp = tcp_win_from_space(sk, rcvbuf);
diff --git a/patches.suse/tcp-annotate-sk-sk_sndbuf-lockless-reads.patch b/patches.suse/tcp-annotate-sk-sk_sndbuf-lockless-reads.patch
new file mode 100644
index 0000000000..21cf150219
--- /dev/null
+++ b/patches.suse/tcp-annotate-sk-sk_sndbuf-lockless-reads.patch
@@ -0,0 +1,169 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 10 Oct 2019 20:17:45 -0700
+Subject: tcp: annotate sk->sk_sndbuf lockless reads
+Patch-mainline: v5.4-rc4
+Git-commit: e292f05e0df73f9fcc93329663936e1ded97a988
+References: bsc#1154353
+
+For the sake of tcp_poll(), there are few places where we fetch
+sk->sk_sndbuf while this field can change from IRQ or other cpu.
+
+We need to add READ_ONCE() annotations, and also make sure write
+sides use corresponding WRITE_ONCE() to avoid store-tearing.
+
+Note that other transports probably need similar fixes.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ include/net/sock.h | 18 +++++++++++-------
+ net/core/filter.c | 3 ++-
+ net/core/sock.c | 15 +++++++++------
+ net/ipv4/tcp.c | 2 +-
+ net/ipv4/tcp_input.c | 3 ++-
+ 5 files changed, 25 insertions(+), 16 deletions(-)
+
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -883,7 +883,7 @@ static inline int sk_stream_min_wspace(c
+
+ static inline int sk_stream_wspace(const struct sock *sk)
+ {
+- return sk->sk_sndbuf - sk->sk_wmem_queued;
++ return READ_ONCE(sk->sk_sndbuf) - sk->sk_wmem_queued;
+ }
+
+ void sk_stream_write_space(struct sock *sk);
+@@ -1207,7 +1207,7 @@ static inline void sk_refcnt_debug_relea
+
+ static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
+ {
+- if (sk->sk_wmem_queued >= sk->sk_sndbuf)
++ if (sk->sk_wmem_queued >= READ_ONCE(sk->sk_sndbuf))
+ return false;
+
+ return sk->sk_prot->stream_memory_free ?
+@@ -2220,10 +2220,14 @@ static inline void sk_wake_async(const s
+
+ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
+ {
+- if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
+- sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
+- sk->sk_sndbuf = max_t(u32, sk->sk_sndbuf, SOCK_MIN_SNDBUF);
+- }
++ u32 val;
++
++ if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
++ return;
++
++ val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
++
++ WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
+ }
+
+ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
+@@ -2251,7 +2255,7 @@ bool sk_page_frag_refill(struct sock *sk
+ */
+ static inline bool sock_writeable(const struct sock *sk)
+ {
+- return refcount_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
++ return refcount_read(&sk->sk_wmem_alloc) < (READ_ONCE(sk->sk_sndbuf) >> 1);
+ }
+
+ static inline gfp_t gfp_any(void)
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -4258,7 +4258,8 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_so
+ case SO_SNDBUF:
+ val = min_t(u32, val, sysctl_wmem_max);
+ sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
+- sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
++ WRITE_ONCE(sk->sk_sndbuf,
++ max_t(int, val * 2, SOCK_MIN_SNDBUF));
+ break;
+ case SO_MAX_PACING_RATE: /* 32bit version */
+ if (val != ~0U)
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -785,7 +785,8 @@ set_sndbuf:
+ */
+ val = min_t(int, val, INT_MAX / 2);
+ sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
+- sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
++ WRITE_ONCE(sk->sk_sndbuf,
++ max_t(int, val * 2, SOCK_MIN_SNDBUF));
+ /* Wake up sending tasks if we upped the value. */
+ sk->sk_write_space(sk);
+ break;
+@@ -2089,8 +2090,10 @@ EXPORT_SYMBOL(sock_i_ino);
+ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
+ gfp_t priority)
+ {
+- if (force || refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
++ if (force ||
++ refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) {
+ struct sk_buff *skb = alloc_skb(size, priority);
++
+ if (skb) {
+ skb_set_owner_w(skb, sk);
+ return skb;
+@@ -2191,7 +2194,7 @@ static long sock_wait_for_wmem(struct so
+ break;
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+- if (refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
++ if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf))
+ break;
+ if (sk->sk_shutdown & SEND_SHUTDOWN)
+ break;
+@@ -2226,7 +2229,7 @@ struct sk_buff *sock_alloc_send_pskb(str
+ if (sk->sk_shutdown & SEND_SHUTDOWN)
+ goto failure;
+
+- if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
++ if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf))
+ break;
+
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+@@ -2807,7 +2810,7 @@ static void sock_def_write_space(struct
+ /* Do not wake up a writer until he can make "significant"
+ * progress. --DaveM
+ */
+- if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
++ if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= READ_ONCE(sk->sk_sndbuf)) {
+ wq = rcu_dereference(sk->sk_wq);
+ if (skwq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
+@@ -3207,7 +3210,7 @@ void sk_get_meminfo(const struct sock *s
+ mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
+ mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf);
+ mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
+- mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
++ mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf);
+ mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
+ mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
+ mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -450,7 +450,7 @@ void tcp_init_sock(struct sock *sk)
+
+ icsk->icsk_sync_mss = tcp_sync_mss;
+
+- sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1];
++ WRITE_ONCE(sk->sk_sndbuf, sock_net(sk)->ipv4.sysctl_tcp_wmem[1]);
+ WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]);
+
+ sk_sockets_allocated_inc(sk);
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -359,7 +359,8 @@ static void tcp_sndbuf_expand(struct soc
+ sndmem *= nr_segs * per_mss;
+
+ if (sk->sk_sndbuf < sndmem)
+- sk->sk_sndbuf = min(sndmem, sock_net(sk)->ipv4.sysctl_tcp_wmem[2]);
++ WRITE_ONCE(sk->sk_sndbuf,
++ min(sndmem, sock_net(sk)->ipv4.sysctl_tcp_wmem[2]));
+ }
+
+ /* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
diff --git a/patches.suse/tcp-annotate-sk-sk_wmem_queued-lockless-reads.patch b/patches.suse/tcp-annotate-sk-sk_wmem_queued-lockless-reads.patch
new file mode 100644
index 0000000000..fa74f1cf8c
--- /dev/null
+++ b/patches.suse/tcp-annotate-sk-sk_wmem_queued-lockless-reads.patch
@@ -0,0 +1,220 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 10 Oct 2019 20:17:46 -0700
+Subject: tcp: annotate sk->sk_wmem_queued lockless reads
+Patch-mainline: v5.4-rc4
+Git-commit: ab4e846a82d0ae00176de19f2db3c5c64f8eb5f2
+References: bsc#1154353
+
+For the sake of tcp_poll(), there are few places where we fetch
+sk->sk_wmem_queued while this field can change from IRQ or other cpu.
+
+We need to add READ_ONCE() annotations, and also make sure write
+sides use corresponding WRITE_ONCE() to avoid store-tearing.
+
+sk_wmem_queued_add() helper is added so that we can in
+the future convert to ADD_ONCE() or equivalent if/when
+available.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ include/net/sock.h | 15 ++++++++++-----
+ include/trace/events/sock.h | 2 +-
+ net/core/datagram.c | 2 +-
+ net/core/sock.c | 2 +-
+ net/ipv4/inet_diag.c | 2 +-
+ net/ipv4/tcp.c | 4 ++--
+ net/ipv4/tcp_output.c | 14 +++++++-------
+ net/sched/em_meta.c | 2 +-
+ 8 files changed, 24 insertions(+), 19 deletions(-)
+
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -878,12 +878,17 @@ static inline bool sk_acceptq_is_full(co
+ */
+ static inline int sk_stream_min_wspace(const struct sock *sk)
+ {
+- return sk->sk_wmem_queued >> 1;
++ return READ_ONCE(sk->sk_wmem_queued) >> 1;
+ }
+
+ static inline int sk_stream_wspace(const struct sock *sk)
+ {
+- return READ_ONCE(sk->sk_sndbuf) - sk->sk_wmem_queued;
++ return READ_ONCE(sk->sk_sndbuf) - READ_ONCE(sk->sk_wmem_queued);
++}
++
++static inline void sk_wmem_queued_add(struct sock *sk, int val)
++{
++ WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val);
+ }
+
+ void sk_stream_write_space(struct sock *sk);
+@@ -1207,7 +1212,7 @@ static inline void sk_refcnt_debug_relea
+
+ static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
+ {
+- if (sk->sk_wmem_queued >= READ_ONCE(sk->sk_sndbuf))
++ if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf))
+ return false;
+
+ return sk->sk_prot->stream_memory_free ?
+@@ -1467,7 +1472,7 @@ DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cach
+ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
+ {
+ sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
+- sk->sk_wmem_queued -= skb->truesize;
++ sk_wmem_queued_add(sk, -skb->truesize);
+ sk_mem_uncharge(sk, skb->truesize);
+ if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
+ !sk->sk_tx_skb_cache && !skb_cloned(skb)) {
+@@ -2014,7 +2019,7 @@ static inline int skb_copy_to_page_nocac
+ skb->len += copy;
+ skb->data_len += copy;
+ skb->truesize += copy;
+- sk->sk_wmem_queued += copy;
++ sk_wmem_queued_add(sk, copy);
+ sk_mem_charge(sk, copy);
+ return 0;
+ }
+--- a/include/trace/events/sock.h
++++ b/include/trace/events/sock.h
+@@ -115,7 +115,7 @@ TRACE_EVENT(sock_exceed_buf_limit,
+ __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
+ __entry->sysctl_wmem = sk_get_wmem0(sk, prot);
+ __entry->wmem_alloc = refcount_read(&sk->sk_wmem_alloc);
+- __entry->wmem_queued = sk->sk_wmem_queued;
++ __entry->wmem_queued = READ_ONCE(sk->sk_wmem_queued);
+ __entry->kind = kind;
+ ),
+
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -640,7 +640,7 @@ int __zerocopy_sg_from_iter(struct sock
+ skb->len += copied;
+ skb->truesize += truesize;
+ if (sk && sk->sk_type == SOCK_STREAM) {
+- sk->sk_wmem_queued += truesize;
++ sk_wmem_queued_add(sk, truesize);
+ sk_mem_charge(sk, truesize);
+ } else {
+ refcount_add(truesize, &skb->sk->sk_wmem_alloc);
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -3212,7 +3212,7 @@ void sk_get_meminfo(const struct sock *s
+ mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
+ mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf);
+ mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
+- mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
++ mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued);
+ mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
+ mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
+ mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
+--- a/net/ipv4/inet_diag.c
++++ b/net/ipv4/inet_diag.c
+@@ -193,7 +193,7 @@ int inet_sk_diag_fill(struct sock *sk, s
+ if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
+ struct inet_diag_meminfo minfo = {
+ .idiag_rmem = sk_rmem_alloc_get(sk),
+- .idiag_wmem = sk->sk_wmem_queued,
++ .idiag_wmem = READ_ONCE(sk->sk_wmem_queued),
+ .idiag_fmem = sk->sk_forward_alloc,
+ .idiag_tmem = sk_wmem_alloc_get(sk),
+ };
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -659,7 +659,7 @@ static void skb_entail(struct sock *sk,
+ tcb->sacked = 0;
+ __skb_header_release(skb);
+ tcp_add_write_queue_tail(sk, skb);
+- sk->sk_wmem_queued += skb->truesize;
++ sk_wmem_queued_add(sk, skb->truesize);
+ sk_mem_charge(sk, skb->truesize);
+ if (tp->nonagle & TCP_NAGLE_PUSH)
+ tp->nonagle &= ~TCP_NAGLE_PUSH;
+@@ -1034,7 +1034,7 @@ new_segment:
+ skb->len += copy;
+ skb->data_len += copy;
+ skb->truesize += copy;
+- sk->sk_wmem_queued += copy;
++ sk_wmem_queued_add(sk, copy);
+ sk_mem_charge(sk, copy);
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ WRITE_ONCE(tp->write_seq, tp->write_seq + copy);
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1188,7 +1188,7 @@ static void tcp_queue_skb(struct sock *s
+ WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq);
+ __skb_header_release(skb);
+ tcp_add_write_queue_tail(sk, skb);
+- sk->sk_wmem_queued += skb->truesize;
++ sk_wmem_queued_add(sk, skb->truesize);
+ sk_mem_charge(sk, skb->truesize);
+ }
+
+@@ -1322,7 +1322,7 @@ int tcp_fragment(struct sock *sk, enum t
+ return -ENOMEM; /* We'll just try again later. */
+ skb_copy_decrypted(buff, skb);
+
+- sk->sk_wmem_queued += buff->truesize;
++ sk_wmem_queued_add(sk, buff->truesize);
+ sk_mem_charge(sk, buff->truesize);
+ nlen = skb->len - len - nsize;
+ buff->truesize += nlen;
+@@ -1432,7 +1432,7 @@ int tcp_trim_head(struct sock *sk, struc
+
+ if (delta_truesize) {
+ skb->truesize -= delta_truesize;
+- sk->sk_wmem_queued -= delta_truesize;
++ sk_wmem_queued_add(sk, -delta_truesize);
+ sk_mem_uncharge(sk, delta_truesize);
+ sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
+ }
+@@ -1877,7 +1877,7 @@ static int tso_fragment(struct sock *sk,
+ return -ENOMEM;
+ skb_copy_decrypted(buff, skb);
+
+- sk->sk_wmem_queued += buff->truesize;
++ sk_wmem_queued_add(sk, buff->truesize);
+ sk_mem_charge(sk, buff->truesize);
+ buff->truesize += nlen;
+ skb->truesize -= nlen;
+@@ -2141,7 +2141,7 @@ static int tcp_mtu_probe(struct sock *sk
+ nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
+ if (!nskb)
+ return -1;
+- sk->sk_wmem_queued += nskb->truesize;
++ sk_wmem_queued_add(sk, nskb->truesize);
+ sk_mem_charge(sk, nskb->truesize);
+
+ skb = tcp_send_head(sk);
+@@ -3211,7 +3211,7 @@ int tcp_send_synack(struct sock *sk)
+ tcp_rtx_queue_unlink_and_free(skb, sk);
+ __skb_header_release(nskb);
+ tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
+- sk->sk_wmem_queued += nskb->truesize;
++ sk_wmem_queued_add(sk, nskb->truesize);
+ sk_mem_charge(sk, nskb->truesize);
+ skb = nskb;
+ }
+@@ -3436,7 +3436,7 @@ static void tcp_connect_queue_skb(struct
+
+ tcb->end_seq += skb->len;
+ __skb_header_release(skb);
+- sk->sk_wmem_queued += skb->truesize;
++ sk_wmem_queued_add(sk, skb->truesize);
+ sk_mem_charge(sk, skb->truesize);
+ WRITE_ONCE(tp->write_seq, tcb->end_seq);
+ tp->packets_out += tcp_skb_pcount(skb);
+--- a/net/sched/em_meta.c
++++ b/net/sched/em_meta.c
+@@ -446,7 +446,7 @@ META_COLLECTOR(int_sk_wmem_queued)
+ *err = -1;
+ return;
+ }
+- dst->value = sk->sk_wmem_queued;
++ dst->value = READ_ONCE(sk->sk_wmem_queued);
+ }
+
+ META_COLLECTOR(int_sk_fwd_alloc)
diff --git a/patches.suse/tcp-annotate-tp-copied_seq-lockless-reads.patch b/patches.suse/tcp-annotate-tp-copied_seq-lockless-reads.patch
new file mode 100644
index 0000000000..105a2089c3
--- /dev/null
+++ b/patches.suse/tcp-annotate-tp-copied_seq-lockless-reads.patch
@@ -0,0 +1,200 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 10 Oct 2019 20:17:40 -0700
+Subject: tcp: annotate tp->copied_seq lockless reads
+Patch-mainline: v5.4-rc4
+Git-commit: 7db48e983930285b765743ebd665aecf9850582b
+References: bsc#1154353
+
+There are few places where we fetch tp->copied_seq while
+this field can change from IRQ or other cpu.
+
+We need to add READ_ONCE() annotations, and also make
+sure write sides use corresponding WRITE_ONCE() to avoid
+store-tearing.
+
+Note that tcp_inq_hint() was already using READ_ONCE(tp->copied_seq)
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/ipv4/tcp.c | 20 ++++++++++----------
+ net/ipv4/tcp_diag.c | 3 ++-
+ net/ipv4/tcp_input.c | 6 +++---
+ net/ipv4/tcp_ipv4.c | 2 +-
+ net/ipv4/tcp_minisocks.c | 2 +-
+ net/ipv4/tcp_output.c | 2 +-
+ net/ipv6/tcp_ipv6.c | 2 +-
+ 7 files changed, 19 insertions(+), 18 deletions(-)
+
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -477,7 +477,7 @@ static void tcp_tx_timestamp(struct sock
+ static inline bool tcp_stream_is_readable(const struct tcp_sock *tp,
+ int target, struct sock *sk)
+ {
+- return (READ_ONCE(tp->rcv_nxt) - tp->copied_seq >= target) ||
++ return (READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq) >= target) ||
+ (sk->sk_prot->stream_memory_read ?
+ sk->sk_prot->stream_memory_read(sk) : false);
+ }
+@@ -546,7 +546,7 @@ __poll_t tcp_poll(struct file *file, str
+ (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) {
+ int target = sock_rcvlowat(sk, 0, INT_MAX);
+
+- if (tp->urg_seq == tp->copied_seq &&
++ if (tp->urg_seq == READ_ONCE(tp->copied_seq) &&
+ !sock_flag(sk, SOCK_URGINLINE) &&
+ tp->urg_data)
+ target++;
+@@ -607,7 +607,7 @@ int tcp_ioctl(struct sock *sk, int cmd,
+ unlock_sock_fast(sk, slow);
+ break;
+ case SIOCATMARK:
+- answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
++ answ = tp->urg_data && tp->urg_seq == READ_ONCE(tp->copied_seq);
+ break;
+ case SIOCOUTQ:
+ if (sk->sk_state == TCP_LISTEN)
+@@ -1667,9 +1667,9 @@ int tcp_read_sock(struct sock *sk, read_
+ sk_eat_skb(sk, skb);
+ if (!desc->count)
+ break;
+- tp->copied_seq = seq;
++ WRITE_ONCE(tp->copied_seq, seq);
+ }
+- tp->copied_seq = seq;
++ WRITE_ONCE(tp->copied_seq, seq);
+
+ tcp_rcv_space_adjust(sk);
+
+@@ -1818,7 +1818,7 @@ static int tcp_zerocopy_receive(struct s
+ out:
+ up_read(&current->mm->mmap_sem);
+ if (length) {
+- tp->copied_seq = seq;
++ WRITE_ONCE(tp->copied_seq, seq);
+ tcp_rcv_space_adjust(sk);
+
+ /* Clean up data we have read: This will do ACK frames. */
+@@ -2116,7 +2116,7 @@ found_ok_skb:
+ if (urg_offset < used) {
+ if (!urg_offset) {
+ if (!sock_flag(sk, SOCK_URGINLINE)) {
+- ++*seq;
++ WRITE_ONCE(*seq, *seq + 1);
+ urg_hole++;
+ offset++;
+ used--;
+@@ -2138,7 +2138,7 @@ found_ok_skb:
+ }
+ }
+
+- *seq += used;
++ WRITE_ONCE(*seq, *seq + used);
+ copied += used;
+ len -= used;
+
+@@ -2165,7 +2165,7 @@ skip_copy:
+
+ found_fin_ok:
+ /* Process the FIN. */
+- ++*seq;
++ WRITE_ONCE(*seq, *seq + 1);
+ if (!(flags & MSG_PEEK))
+ sk_eat_skb(sk, skb);
+ break;
+@@ -2587,7 +2587,7 @@ int tcp_disconnect(struct sock *sk, int
+ __kfree_skb(sk->sk_rx_skb_cache);
+ sk->sk_rx_skb_cache = NULL;
+ }
+- tp->copied_seq = tp->rcv_nxt;
++ WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
+ tp->urg_data = 0;
+ tcp_write_queue_purge(sk);
+ tcp_fastopen_active_disable_ofo_check(sk);
+--- a/net/ipv4/tcp_diag.c
++++ b/net/ipv4/tcp_diag.c
+@@ -26,7 +26,8 @@ static void tcp_diag_get_info(struct soc
+ } else if (sk->sk_type == SOCK_STREAM) {
+ const struct tcp_sock *tp = tcp_sk(sk);
+
+- r->idiag_rqueue = max_t(int, READ_ONCE(tp->rcv_nxt) - tp->copied_seq, 0);
++ r->idiag_rqueue = max_t(int, READ_ONCE(tp->rcv_nxt) -
++ READ_ONCE(tp->copied_seq), 0);
+ r->idiag_wqueue = tp->write_seq - tp->snd_una;
+ }
+ if (info)
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -5961,7 +5961,7 @@ static int tcp_rcv_synsent_state_process
+ /* Remember, tcp_poll() does not lock socket!
+ * Change state from SYN-SENT only after copied_seq
+ * is initialized. */
+- tp->copied_seq = tp->rcv_nxt;
++ WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
+
+ smc_check_reset_syn(tp);
+
+@@ -6036,7 +6036,7 @@ discard:
+ }
+
+ WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1);
+- tp->copied_seq = tp->rcv_nxt;
++ WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
+ tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
+
+ /* RFC1323: The window in SYN & SYN/ACK segments is
+@@ -6216,7 +6216,7 @@ int tcp_rcv_state_process(struct sock *s
+ tcp_try_undo_spurious_syn(sk);
+ tp->retrans_stamp = 0;
+ tcp_init_transfer(sk, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB);
+- tp->copied_seq = tp->rcv_nxt;
++ WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
+ }
+ smp_mb();
+ tcp_set_state(sk, TCP_ESTABLISHED);
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -2452,7 +2452,7 @@ static void get_tcp4_sock(struct sock *s
+ * we might find a transient negative value.
+ */
+ rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
+- tp->copied_seq, 0);
++ READ_ONCE(tp->copied_seq), 0);
+
+ seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
+ "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -477,7 +477,7 @@ struct sock *tcp_create_openreq_child(co
+
+ seq = treq->rcv_isn + 1;
+ newtp->rcv_wup = seq;
+- newtp->copied_seq = seq;
++ WRITE_ONCE(newtp->copied_seq, seq);
+ WRITE_ONCE(newtp->rcv_nxt, seq);
+ newtp->segs_in = 1;
+
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -3422,7 +3422,7 @@ static void tcp_connect_init(struct sock
+ else
+ tp->rcv_tstamp = tcp_jiffies32;
+ tp->rcv_wup = tp->rcv_nxt;
+- tp->copied_seq = tp->rcv_nxt;
++ WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
+
+ inet_csk(sk)->icsk_rto = tcp_timeout_init(sk);
+ inet_csk(sk)->icsk_retransmits = 0;
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1890,7 +1890,7 @@ static void get_tcp6_sock(struct seq_fil
+ * we might find a transient negative value.
+ */
+ rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
+- tp->copied_seq, 0);
++ READ_ONCE(tp->copied_seq), 0);
+
+ seq_printf(seq,
+ "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
diff --git a/patches.suse/tcp-annotate-tp-rcv_nxt-lockless-reads.patch b/patches.suse/tcp-annotate-tp-rcv_nxt-lockless-reads.patch
new file mode 100644
index 0000000000..5dae11fcca
--- /dev/null
+++ b/patches.suse/tcp-annotate-tp-rcv_nxt-lockless-reads.patch
@@ -0,0 +1,184 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 10 Oct 2019 20:17:39 -0700
+Subject: tcp: annotate tp->rcv_nxt lockless reads
+Patch-mainline: v5.4-rc4
+Git-commit: dba7d9b8c739df27ff3a234c81d6c6b23e3986fa
+References: bsc#1154353
+
+There are few places where we fetch tp->rcv_nxt while
+this field can change from IRQ or other cpu.
+
+We need to add READ_ONCE() annotations, and also make
+sure write sides use corresponding WRITE_ONCE() to avoid
+store-tearing.
+
+Note that tcp_inq_hint() was already using READ_ONCE(tp->rcv_nxt)
+
+syzbot reported :
+
+BUG: KCSAN: data-race in tcp_poll / tcp_queue_rcv
+
+write to 0xffff888120425770 of 4 bytes by interrupt on cpu 0:
+ tcp_rcv_nxt_update net/ipv4/tcp_input.c:3365 [inline]
+ tcp_queue_rcv+0x180/0x380 net/ipv4/tcp_input.c:4638
+ tcp_rcv_established+0xbf1/0xf50 net/ipv4/tcp_input.c:5616
+ tcp_v4_do_rcv+0x381/0x4e0 net/ipv4/tcp_ipv4.c:1542
+ tcp_v4_rcv+0x1a03/0x1bf0 net/ipv4/tcp_ipv4.c:1923
+ ip_protocol_deliver_rcu+0x51/0x470 net/ipv4/ip_input.c:204
+ ip_local_deliver_finish+0x110/0x140 net/ipv4/ip_input.c:231
+ NF_HOOK include/linux/netfilter.h:305 [inline]
+ NF_HOOK include/linux/netfilter.h:299 [inline]
+ ip_local_deliver+0x133/0x210 net/ipv4/ip_input.c:252
+ dst_input include/net/dst.h:442 [inline]
+ ip_rcv_finish+0x121/0x160 net/ipv4/ip_input.c:413
+ NF_HOOK include/linux/netfilter.h:305 [inline]
+ NF_HOOK include/linux/netfilter.h:299 [inline]
+ ip_rcv+0x18f/0x1a0 net/ipv4/ip_input.c:523
+ __netif_receive_skb_one_core+0xa7/0xe0 net/core/dev.c:5004
+ __netif_receive_skb+0x37/0xf0 net/core/dev.c:5118
+ netif_receive_skb_internal+0x59/0x190 net/core/dev.c:5208
+ napi_skb_finish net/core/dev.c:5671 [inline]
+ napi_gro_receive+0x28f/0x330 net/core/dev.c:5704
+ receive_buf+0x284/0x30b0 drivers/net/virtio_net.c:1061
+
+read to 0xffff888120425770 of 4 bytes by task 7254 on cpu 1:
+ tcp_stream_is_readable net/ipv4/tcp.c:480 [inline]
+ tcp_poll+0x204/0x6b0 net/ipv4/tcp.c:554
+ sock_poll+0xed/0x250 net/socket.c:1256
+ vfs_poll include/linux/poll.h:90 [inline]
+ ep_item_poll.isra.0+0x90/0x190 fs/eventpoll.c:892
+ ep_send_events_proc+0x113/0x5c0 fs/eventpoll.c:1749
+ ep_scan_ready_list.constprop.0+0x189/0x500 fs/eventpoll.c:704
+ ep_send_events fs/eventpoll.c:1793 [inline]
+ ep_poll+0xe3/0x900 fs/eventpoll.c:1930
+ do_epoll_wait+0x162/0x180 fs/eventpoll.c:2294
+ __do_sys_epoll_pwait fs/eventpoll.c:2325 [inline]
+ __se_sys_epoll_pwait fs/eventpoll.c:2311 [inline]
+ __x64_sys_epoll_pwait+0xcd/0x170 fs/eventpoll.c:2311
+ do_syscall_64+0xcf/0x2f0 arch/x86/entry/common.c:296
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 1 PID: 7254 Comm: syz-fuzzer Not tainted 5.3.0+ #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/ipv4/tcp.c | 4 ++--
+ net/ipv4/tcp_diag.c | 2 +-
+ net/ipv4/tcp_input.c | 6 +++---
+ net/ipv4/tcp_ipv4.c | 3 ++-
+ net/ipv4/tcp_minisocks.c | 7 +++++--
+ net/ipv6/tcp_ipv6.c | 3 ++-
+ 6 files changed, 15 insertions(+), 10 deletions(-)
+
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -477,7 +477,7 @@ static void tcp_tx_timestamp(struct sock
+ static inline bool tcp_stream_is_readable(const struct tcp_sock *tp,
+ int target, struct sock *sk)
+ {
+- return (tp->rcv_nxt - tp->copied_seq >= target) ||
++ return (READ_ONCE(tp->rcv_nxt) - tp->copied_seq >= target) ||
+ (sk->sk_prot->stream_memory_read ?
+ sk->sk_prot->stream_memory_read(sk) : false);
+ }
+@@ -2934,7 +2934,7 @@ static int do_tcp_setsockopt(struct sock
+ else if (tp->repair_queue == TCP_SEND_QUEUE)
+ tp->write_seq = val;
+ else if (tp->repair_queue == TCP_RECV_QUEUE)
+- tp->rcv_nxt = val;
++ WRITE_ONCE(tp->rcv_nxt, val);
+ else
+ err = -EINVAL;
+ break;
+--- a/net/ipv4/tcp_diag.c
++++ b/net/ipv4/tcp_diag.c
+@@ -26,7 +26,7 @@ static void tcp_diag_get_info(struct soc
+ } else if (sk->sk_type == SOCK_STREAM) {
+ const struct tcp_sock *tp = tcp_sk(sk);
+
+- r->idiag_rqueue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
++ r->idiag_rqueue = max_t(int, READ_ONCE(tp->rcv_nxt) - tp->copied_seq, 0);
+ r->idiag_wqueue = tp->write_seq - tp->snd_una;
+ }
+ if (info)
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3362,7 +3362,7 @@ static void tcp_rcv_nxt_update(struct tc
+
+ sock_owned_by_me((struct sock *)tp);
+ tp->bytes_received += delta;
+- tp->rcv_nxt = seq;
++ WRITE_ONCE(tp->rcv_nxt, seq);
+ }
+
+ /* Update our send window.
+@@ -5932,7 +5932,7 @@ static int tcp_rcv_synsent_state_process
+ /* Ok.. it's good. Set up sequence numbers and
+ * move to established.
+ */
+- tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
++ WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1);
+ tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
+
+ /* RFC1323: The window in SYN & SYN/ACK segments is
+@@ -6035,7 +6035,7 @@ discard:
+ tp->tcp_header_len = sizeof(struct tcphdr);
+ }
+
+- tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
++ WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1);
+ tp->copied_seq = tp->rcv_nxt;
+ tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
+
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -2451,7 +2451,8 @@ static void get_tcp4_sock(struct sock *s
+ /* Because we don't lock the socket,
+ * we might find a transient negative value.
+ */
+- rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
++ rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
++ tp->copied_seq, 0);
+
+ seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
+ "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -461,6 +461,7 @@ struct sock *tcp_create_openreq_child(co
+ struct tcp_request_sock *treq = tcp_rsk(req);
+ struct inet_connection_sock *newicsk;
+ struct tcp_sock *oldtp, *newtp;
++ u32 seq;
+
+ if (!newsk)
+ return NULL;
+@@ -474,8 +475,10 @@ struct sock *tcp_create_openreq_child(co
+ /* Now setup tcp_sock */
+ newtp->pred_flags = 0;
+
+- newtp->rcv_wup = newtp->copied_seq =
+- newtp->rcv_nxt = treq->rcv_isn + 1;
++ seq = treq->rcv_isn + 1;
++ newtp->rcv_wup = seq;
++ newtp->copied_seq = seq;
++ WRITE_ONCE(newtp->rcv_nxt, seq);
+ newtp->segs_in = 1;
+
+ newtp->snd_sml = newtp->snd_una =
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1889,7 +1889,8 @@ static void get_tcp6_sock(struct seq_fil
+ /* Because we don't lock the socket,
+ * we might find a transient negative value.
+ */
+- rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
++ rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
++ tp->copied_seq, 0);
+
+ seq_printf(seq,
+ "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
diff --git a/patches.suse/tcp-annotate-tp-snd_nxt-lockless-reads.patch b/patches.suse/tcp-annotate-tp-snd_nxt-lockless-reads.patch
new file mode 100644
index 0000000000..3c141125a3
--- /dev/null
+++ b/patches.suse/tcp-annotate-tp-snd_nxt-lockless-reads.patch
@@ -0,0 +1,106 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 10 Oct 2019 20:17:42 -0700
+Subject: tcp: annotate tp->snd_nxt lockless reads
+Patch-mainline: v5.4-rc4
+Git-commit: e0d694d638dba768b47be31c22e1a9b4f862f561
+References: bsc#1154353
+
+There are few places where we fetch tp->snd_nxt while
+this field can change from IRQ or other cpu.
+
+We need to add READ_ONCE() annotations, and also make
+sure write sides use corresponding WRITE_ONCE() to avoid
+store-tearing.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ include/net/tcp.h | 3 ++-
+ net/ipv4/tcp.c | 3 ++-
+ net/ipv4/tcp_minisocks.c | 6 ++++--
+ net/ipv4/tcp_output.c | 10 +++++-----
+ 4 files changed, 13 insertions(+), 9 deletions(-)
+
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1917,7 +1917,8 @@ static inline u32 tcp_notsent_lowat(cons
+ static inline bool tcp_stream_memory_free(const struct sock *sk, int wake)
+ {
+ const struct tcp_sock *tp = tcp_sk(sk);
+- u32 notsent_bytes = READ_ONCE(tp->write_seq) - tp->snd_nxt;
++ u32 notsent_bytes = READ_ONCE(tp->write_seq) -
++ READ_ONCE(tp->snd_nxt);
+
+ return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
+ }
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -625,7 +625,8 @@ int tcp_ioctl(struct sock *sk, int cmd,
+ if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
+ answ = 0;
+ else
+- answ = READ_ONCE(tp->write_seq) - tp->snd_nxt;
++ answ = READ_ONCE(tp->write_seq) -
++ READ_ONCE(tp->snd_nxt);
+ break;
+ default:
+ return -ENOIOCTLCMD;
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -481,8 +481,10 @@ struct sock *tcp_create_openreq_child(co
+ WRITE_ONCE(newtp->rcv_nxt, seq);
+ newtp->segs_in = 1;
+
+- newtp->snd_sml = newtp->snd_una =
+- newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
++ seq = treq->snt_isn + 1;
++ newtp->snd_sml = newtp->snd_una = seq;
++ WRITE_ONCE(newtp->snd_nxt, seq);
++ newtp->snd_up = seq;
+
+ INIT_LIST_HEAD(&newtp->tsq_node);
+ INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -67,7 +67,7 @@ static void tcp_event_new_data_sent(stru
+ struct tcp_sock *tp = tcp_sk(sk);
+ unsigned int prior_packets = tp->packets_out;
+
+- tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
++ WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq);
+
+ __skb_unlink(skb, &sk->sk_write_queue);
+ tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
+@@ -3131,7 +3131,7 @@ void tcp_send_fin(struct sock *sk)
+ * if FIN had been sent. This is because retransmit path
+ * does not change tp->snd_nxt.
+ */
+- tp->snd_nxt++;
++ WRITE_ONCE(tp->snd_nxt, tp->snd_nxt + 1);
+ return;
+ }
+ } else {
+@@ -3415,7 +3415,7 @@ static void tcp_connect_init(struct sock
+ tp->snd_una = tp->write_seq;
+ tp->snd_sml = tp->write_seq;
+ tp->snd_up = tp->write_seq;
+- tp->snd_nxt = tp->write_seq;
++ WRITE_ONCE(tp->snd_nxt, tp->write_seq);
+
+ if (likely(!tp->repair))
+ tp->rcv_nxt = 0;
+@@ -3575,11 +3575,11 @@ int tcp_connect(struct sock *sk)
+ /* We change tp->snd_nxt after the tcp_transmit_skb() call
+ * in order to make this packet get counted in tcpOutSegs.
+ */
+- tp->snd_nxt = tp->write_seq;
++ WRITE_ONCE(tp->snd_nxt, tp->write_seq);
+ tp->pushed_seq = tp->write_seq;
+ buff = tcp_send_head(sk);
+ if (unlikely(buff)) {
+- tp->snd_nxt = TCP_SKB_CB(buff)->seq;
++ WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq);
+ tp->pushed_seq = TCP_SKB_CB(buff)->seq;
+ }
+ TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
diff --git a/patches.suse/tcp-annotate-tp-urg_seq-lockless-reads.patch b/patches.suse/tcp-annotate-tp-urg_seq-lockless-reads.patch
new file mode 100644
index 0000000000..bb46163bdd
--- /dev/null
+++ b/patches.suse/tcp-annotate-tp-urg_seq-lockless-reads.patch
@@ -0,0 +1,54 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 10 Oct 2019 20:17:43 -0700
+Subject: tcp: annotate tp->urg_seq lockless reads
+Patch-mainline: v5.4-rc4
+Git-commit: d9b55bf7b6788ec0bd1db1acefbc4feb1399144a
+References: bsc#1154353
+
+There two places where we fetch tp->urg_seq while
+this field can change from IRQ or other cpu.
+
+We need to add READ_ONCE() annotations, and also make
+sure write side use corresponding WRITE_ONCE() to avoid
+store-tearing.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/ipv4/tcp.c | 5 +++--
+ net/ipv4/tcp_input.c | 2 +-
+ 2 files changed, 4 insertions(+), 3 deletions(-)
+
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -546,7 +546,7 @@ __poll_t tcp_poll(struct file *file, str
+ (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) {
+ int target = sock_rcvlowat(sk, 0, INT_MAX);
+
+- if (tp->urg_seq == READ_ONCE(tp->copied_seq) &&
++ if (READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) &&
+ !sock_flag(sk, SOCK_URGINLINE) &&
+ tp->urg_data)
+ target++;
+@@ -607,7 +607,8 @@ int tcp_ioctl(struct sock *sk, int cmd,
+ unlock_sock_fast(sk, slow);
+ break;
+ case SIOCATMARK:
+- answ = tp->urg_data && tp->urg_seq == READ_ONCE(tp->copied_seq);
++ answ = tp->urg_data &&
++ READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq);
+ break;
+ case SIOCOUTQ:
+ if (sk->sk_state == TCP_LISTEN)
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -5356,7 +5356,7 @@ static void tcp_check_urg(struct sock *s
+ }
+
+ tp->urg_data = TCP_URG_NOTYET;
+- tp->urg_seq = ptr;
++ WRITE_ONCE(tp->urg_seq, ptr);
+
+ /* Disable header prediction. */
+ tp->pred_flags = 0;
diff --git a/patches.suse/tcp-annotate-tp-write_seq-lockless-reads.patch b/patches.suse/tcp-annotate-tp-write_seq-lockless-reads.patch
new file mode 100644
index 0000000000..df9da78d5f
--- /dev/null
+++ b/patches.suse/tcp-annotate-tp-write_seq-lockless-reads.patch
@@ -0,0 +1,238 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 10 Oct 2019 20:17:41 -0700
+Subject: tcp: annotate tp->write_seq lockless reads
+Patch-mainline: v5.4-rc4
+Git-commit: 0f31746452e6793ad6271337438af8f4defb8940
+References: bsc#1154353
+
+There are few places where we fetch tp->write_seq while
+this field can change from IRQ or other cpu.
+
+We need to add READ_ONCE() annotations, and also make
+sure write sides use corresponding WRITE_ONCE() to avoid
+store-tearing.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ include/net/tcp.h | 2 +-
+ net/ipv4/tcp.c | 20 ++++++++++++--------
+ net/ipv4/tcp_diag.c | 2 +-
+ net/ipv4/tcp_ipv4.c | 21 ++++++++++++---------
+ net/ipv4/tcp_minisocks.c | 2 +-
+ net/ipv4/tcp_output.c | 4 ++--
+ net/ipv6/tcp_ipv6.c | 13 +++++++------
+ 7 files changed, 36 insertions(+), 28 deletions(-)
+
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1917,7 +1917,7 @@ static inline u32 tcp_notsent_lowat(cons
+ static inline bool tcp_stream_memory_free(const struct sock *sk, int wake)
+ {
+ const struct tcp_sock *tp = tcp_sk(sk);
+- u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
++ u32 notsent_bytes = READ_ONCE(tp->write_seq) - tp->snd_nxt;
+
+ return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
+ }
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -616,7 +616,7 @@ int tcp_ioctl(struct sock *sk, int cmd,
+ if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
+ answ = 0;
+ else
+- answ = tp->write_seq - tp->snd_una;
++ answ = READ_ONCE(tp->write_seq) - tp->snd_una;
+ break;
+ case SIOCOUTQNSD:
+ if (sk->sk_state == TCP_LISTEN)
+@@ -625,7 +625,7 @@ int tcp_ioctl(struct sock *sk, int cmd,
+ if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
+ answ = 0;
+ else
+- answ = tp->write_seq - tp->snd_nxt;
++ answ = READ_ONCE(tp->write_seq) - tp->snd_nxt;
+ break;
+ default:
+ return -ENOIOCTLCMD;
+@@ -1035,7 +1035,7 @@ new_segment:
+ sk->sk_wmem_queued += copy;
+ sk_mem_charge(sk, copy);
+ skb->ip_summed = CHECKSUM_PARTIAL;
+- tp->write_seq += copy;
++ WRITE_ONCE(tp->write_seq, tp->write_seq + copy);
+ TCP_SKB_CB(skb)->end_seq += copy;
+ tcp_skb_pcount_set(skb, 0);
+
+@@ -1361,7 +1361,7 @@ new_segment:
+ if (!copied)
+ TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
+
+- tp->write_seq += copy;
++ WRITE_ONCE(tp->write_seq, tp->write_seq + copy);
+ TCP_SKB_CB(skb)->end_seq += copy;
+ tcp_skb_pcount_set(skb, 0);
+
+@@ -2561,6 +2561,7 @@ int tcp_disconnect(struct sock *sk, int
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ int old_state = sk->sk_state;
++ u32 seq;
+
+ if (old_state != TCP_CLOSE)
+ tcp_set_state(sk, TCP_CLOSE);
+@@ -2603,9 +2604,12 @@ int tcp_disconnect(struct sock *sk, int
+ tp->srtt_us = 0;
+ tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
+ tp->rcv_rtt_last_tsecr = 0;
+- tp->write_seq += tp->max_window + 2;
+- if (tp->write_seq == 0)
+- tp->write_seq = 1;
++
++ seq = tp->write_seq + tp->max_window + 2;
++ if (!seq)
++ seq = 1;
++ WRITE_ONCE(tp->write_seq, seq);
++
+ icsk->icsk_backoff = 0;
+ tp->snd_cwnd = 2;
+ icsk->icsk_probes_out = 0;
+@@ -2932,7 +2936,7 @@ static int do_tcp_setsockopt(struct sock
+ if (sk->sk_state != TCP_CLOSE)
+ err = -EPERM;
+ else if (tp->repair_queue == TCP_SEND_QUEUE)
+- tp->write_seq = val;
++ WRITE_ONCE(tp->write_seq, val);
+ else if (tp->repair_queue == TCP_RECV_QUEUE)
+ WRITE_ONCE(tp->rcv_nxt, val);
+ else
+--- a/net/ipv4/tcp_diag.c
++++ b/net/ipv4/tcp_diag.c
+@@ -28,7 +28,7 @@ static void tcp_diag_get_info(struct soc
+
+ r->idiag_rqueue = max_t(int, READ_ONCE(tp->rcv_nxt) -
+ READ_ONCE(tp->copied_seq), 0);
+- r->idiag_wqueue = tp->write_seq - tp->snd_una;
++ r->idiag_wqueue = READ_ONCE(tp->write_seq) - tp->snd_una;
+ }
+ if (info)
+ tcp_get_info(sk, info);
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -164,9 +164,11 @@ int tcp_twsk_unique(struct sock *sk, str
+ * without appearing to create any others.
+ */
+ if (likely(!tp->repair)) {
+- tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
+- if (tp->write_seq == 0)
+- tp->write_seq = 1;
++ u32 seq = tcptw->tw_snd_nxt + 65535 + 2;
++
++ if (!seq)
++ seq = 1;
++ WRITE_ONCE(tp->write_seq, seq);
+ tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
+ tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
+ }
+@@ -253,7 +255,7 @@ int tcp_v4_connect(struct sock *sk, stru
+ tp->rx_opt.ts_recent = 0;
+ tp->rx_opt.ts_recent_stamp = 0;
+ if (likely(!tp->repair))
+- tp->write_seq = 0;
++ WRITE_ONCE(tp->write_seq, 0);
+ }
+
+ inet->inet_dport = usin->sin_port;
+@@ -291,10 +293,11 @@ int tcp_v4_connect(struct sock *sk, stru
+
+ if (likely(!tp->repair)) {
+ if (!tp->write_seq)
+- tp->write_seq = secure_tcp_seq(inet->inet_saddr,
+- inet->inet_daddr,
+- inet->inet_sport,
+- usin->sin_port);
++ WRITE_ONCE(tp->write_seq,
++ secure_tcp_seq(inet->inet_saddr,
++ inet->inet_daddr,
++ inet->inet_sport,
++ usin->sin_port));
+ tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
+ inet->inet_saddr,
+ inet->inet_daddr);
+@@ -2457,7 +2460,7 @@ static void get_tcp4_sock(struct sock *s
+ seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
+ "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
+ i, src, srcp, dest, destp, state,
+- tp->write_seq - tp->snd_una,
++ READ_ONCE(tp->write_seq) - tp->snd_una,
+ rx_queue,
+ timer_active,
+ jiffies_delta_to_clock_t(timer_expires - jiffies),
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -497,7 +497,7 @@ struct sock *tcp_create_openreq_child(co
+ newtp->total_retrans = req->num_retrans;
+
+ tcp_init_xmit_timers(newsk);
+- newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
++ WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
+
+ if (sock_flag(newsk, SOCK_KEEPOPEN))
+ inet_csk_reset_keepalive_timer(newsk,
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1185,7 +1185,7 @@ static void tcp_queue_skb(struct sock *s
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ /* Advance write_seq and place onto the write_queue. */
+- tp->write_seq = TCP_SKB_CB(skb)->end_seq;
++ WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq);
+ __skb_header_release(skb);
+ tcp_add_write_queue_tail(sk, skb);
+ sk->sk_wmem_queued += skb->truesize;
+@@ -3438,7 +3438,7 @@ static void tcp_connect_queue_skb(struct
+ __skb_header_release(skb);
+ sk->sk_wmem_queued += skb->truesize;
+ sk_mem_charge(sk, skb->truesize);
+- tp->write_seq = tcb->end_seq;
++ WRITE_ONCE(tp->write_seq, tcb->end_seq);
+ tp->packets_out += tcp_skb_pcount(skb);
+ }
+
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -215,7 +215,7 @@ static int tcp_v6_connect(struct sock *s
+ !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
+ tp->rx_opt.ts_recent = 0;
+ tp->rx_opt.ts_recent_stamp = 0;
+- tp->write_seq = 0;
++ WRITE_ONCE(tp->write_seq, 0);
+ }
+
+ sk->sk_v6_daddr = usin->sin6_addr;
+@@ -311,10 +311,11 @@ static int tcp_v6_connect(struct sock *s
+
+ if (likely(!tp->repair)) {
+ if (!tp->write_seq)
+- tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
+- sk->sk_v6_daddr.s6_addr32,
+- inet->inet_sport,
+- inet->inet_dport);
++ WRITE_ONCE(tp->write_seq,
++ secure_tcpv6_seq(np->saddr.s6_addr32,
++ sk->sk_v6_daddr.s6_addr32,
++ inet->inet_sport,
++ inet->inet_dport));
+ tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
+ np->saddr.s6_addr32,
+ sk->sk_v6_daddr.s6_addr32);
+@@ -1901,7 +1902,7 @@ static void get_tcp6_sock(struct seq_fil
+ dest->s6_addr32[0], dest->s6_addr32[1],
+ dest->s6_addr32[2], dest->s6_addr32[3], destp,
+ state,
+- tp->write_seq - tp->snd_una,
++ READ_ONCE(tp->write_seq) - tp->snd_una,
+ rx_queue,
+ timer_active,
+ jiffies_delta_to_clock_t(timer_expires - jiffies),
diff --git a/patches.suse/tcp-fix-a-possible-lockdep-splat-in-tcp_done.patch b/patches.suse/tcp-fix-a-possible-lockdep-splat-in-tcp_done.patch
new file mode 100644
index 0000000000..21cb3c0f4d
--- /dev/null
+++ b/patches.suse/tcp-fix-a-possible-lockdep-splat-in-tcp_done.patch
@@ -0,0 +1,40 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Mon, 14 Oct 2019 06:47:57 -0700
+Subject: tcp: fix a possible lockdep splat in tcp_done()
+Patch-mainline: v5.4-rc4
+Git-commit: cab209e571a9375f7dc6db69a6c40d2d98e57e3b
+References: bsc#1154353
+
+syzbot found that if __inet_inherit_port() returns an error,
+we call tcp_done() after inet_csk_prepare_forced_close(),
+meaning the socket lock is no longer held.
+
+We might fix this in a different way in net-next, but
+for 5.4 it seems safer to relax the lockdep check.
+
+Fixes: d983ea6f16b8 ("tcp: add rcu protection around tp->fastopen_rsk")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/ipv4/tcp.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -3841,8 +3841,12 @@ void tcp_done(struct sock *sk)
+ {
+ struct request_sock *req;
+
+- req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
+- lockdep_sock_is_held(sk));
++ /* We might be called with a new socket, after
++ * inet_csk_prepare_forced_close() has been called
++ * so we can not use lockdep_sock_is_held(sk)
++ */
++ req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 1);
++
+ if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
+ TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
+
diff --git a/patches.suse/tcp-increase-tcp_max_syn_backlog-max-value.patch b/patches.suse/tcp-increase-tcp_max_syn_backlog-max-value.patch
new file mode 100644
index 0000000000..f39d9b94b8
--- /dev/null
+++ b/patches.suse/tcp-increase-tcp_max_syn_backlog-max-value.patch
@@ -0,0 +1,55 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 30 Oct 2019 10:05:46 -0700
+Subject: tcp: increase tcp_max_syn_backlog max value
+Patch-mainline: v5.4-rc6
+Git-commit: 623d0c2db02043e43b698fdd8de1bd398b8e7b37
+References: bsc#1154353
+
+tcp_max_syn_backlog default value depends on memory size
+and TCP ehash size. Before this patch, the max value
+was 2048 [1], which is considered too small nowadays.
+
+Increase it to 4096 to match the recent SOMAXCONN change.
+
+[1] This is with TCP ehash size being capped to 524288 buckets.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Willy Tarreau <w@1wt.eu>
+Cc: Yue Cao <ycao009@ucr.edu>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ Documentation/networking/ip-sysctl.txt | 7 +++++--
+ net/ipv4/tcp_ipv4.c | 2 +-
+ 2 files changed, 6 insertions(+), 3 deletions(-)
+
+--- a/Documentation/networking/ip-sysctl.txt
++++ b/Documentation/networking/ip-sysctl.txt
+@@ -402,11 +402,14 @@ tcp_max_orphans - INTEGER
+ up to ~64K of unswappable memory.
+
+ tcp_max_syn_backlog - INTEGER
+- Maximal number of remembered connection requests, which have not
+- received an acknowledgment from connecting client.
++ Maximal number of remembered connection requests (SYN_RECV),
++ which have not received an acknowledgment from connecting client.
++ This is a per-listener limit.
+ The minimal value is 128 for low memory machines, and it will
+ increase in proportion to the memory of machine.
+ If server suffers from overload, try increasing this number.
++ Remember to also check /proc/sys/net/core/somaxconn
++ A SYN_RECV request socket consumes about 304 bytes of memory.
+
+ tcp_max_tw_buckets - INTEGER
+ Maximal number of timewait sockets held by system simultaneously.
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -2676,7 +2676,7 @@ static int __net_init tcp_sk_init(struct
+ net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
+ net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
+
+- net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
++ net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 128);
+ net->ipv4.sysctl_tcp_sack = 1;
+ net->ipv4.sysctl_tcp_window_scaling = 1;
+ net->ipv4.sysctl_tcp_timestamps = 1;
diff --git a/patches.suse/team-fix-nested-locking-lockdep-warning.patch b/patches.suse/team-fix-nested-locking-lockdep-warning.patch
new file mode 100644
index 0000000000..5f21344fdf
--- /dev/null
+++ b/patches.suse/team-fix-nested-locking-lockdep-warning.patch
@@ -0,0 +1,140 @@
+From: Taehee Yoo <ap420073@gmail.com>
+Date: Mon, 21 Oct 2019 18:47:54 +0000
+Subject: team: fix nested locking lockdep warning
+Patch-mainline: v5.4-rc6
+Git-commit: 369f61bee0f584aee09f0736431eb9b330c98571
+References: bsc#1154353
+
+team interface could be nested and it's lock variable could be nested too.
+But this lock uses static lockdep key and there is no nested locking
+handling code such as mutex_lock_nested() and so on.
+so the Lockdep would warn about the circular locking scenario that
+couldn't happen.
+In order to fix, this patch makes the team module to use dynamic lock key
+instead of static key.
+
+Test commands:
+ ip link add team0 type team
+ ip link add team1 type team
+ ip link set team0 master team1
+ ip link set team0 nomaster
+ ip link set team1 master team0
+ ip link set team1 nomaster
+
+Splat that looks like:
+[ 40.364352] WARNING: possible recursive locking detected
+[ 40.364964] 5.4.0-rc3+ #96 Not tainted
+[ 40.365405] --------------------------------------------
+[ 40.365973] ip/750 is trying to acquire lock:
+[ 40.366542] ffff888060b34c40 (&team->lock){+.+.}, at: team_set_mac_address+0x151/0x290 [team]
+[ 40.367689]
+ but task is already holding lock:
+[ 40.368729] ffff888051201c40 (&team->lock){+.+.}, at: team_del_slave+0x29/0x60 [team]
+[ 40.370280]
+ other info that might help us debug this:
+[ 40.371159] Possible unsafe locking scenario:
+
+[ 40.371942] CPU0
+[ 40.372338] ----
+[ 40.372673] lock(&team->lock);
+[ 40.373115] lock(&team->lock);
+[ 40.373549]
+ *** DEADLOCK ***
+
+[ 40.374432] May be due to missing lock nesting notation
+
+[ 40.375338] 2 locks held by ip/750:
+[ 40.375851] #0: ffffffffabcc42b0 (rtnl_mutex){+.+.}, at: rtnetlink_rcv_msg+0x466/0x8a0
+[ 40.376927] #1: ffff888051201c40 (&team->lock){+.+.}, at: team_del_slave+0x29/0x60 [team]
+[ 40.377989]
+ stack backtrace:
+[ 40.378650] CPU: 0 PID: 750 Comm: ip Not tainted 5.4.0-rc3+ #96
+[ 40.379368] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006
+[ 40.380574] Call Trace:
+[ 40.381208] dump_stack+0x7c/0xbb
+[ 40.381959] __lock_acquire+0x269d/0x3de0
+[ 40.382817] ? register_lock_class+0x14d0/0x14d0
+[ 40.383784] ? check_chain_key+0x236/0x5d0
+[ 40.384518] lock_acquire+0x164/0x3b0
+[ 40.385074] ? team_set_mac_address+0x151/0x290 [team]
+[ 40.385805] __mutex_lock+0x14d/0x14c0
+[ 40.386371] ? team_set_mac_address+0x151/0x290 [team]
+[ 40.387038] ? team_set_mac_address+0x151/0x290 [team]
+[ 40.387632] ? mutex_lock_io_nested+0x1380/0x1380
+[ 40.388245] ? team_del_slave+0x60/0x60 [team]
+[ 40.388752] ? rcu_read_lock_sched_held+0x90/0xc0
+[ 40.389304] ? rcu_read_lock_bh_held+0xa0/0xa0
+[ 40.389819] ? lock_acquire+0x164/0x3b0
+[ 40.390285] ? lockdep_rtnl_is_held+0x16/0x20
+[ 40.390797] ? team_port_get_rtnl+0x90/0xe0 [team]
+[ 40.391353] ? __module_text_address+0x13/0x140
+[ 40.391886] ? team_set_mac_address+0x151/0x290 [team]
+[ 40.392547] team_set_mac_address+0x151/0x290 [team]
+[ 40.393111] dev_set_mac_address+0x1f0/0x3f0
+[ ... ]
+
+Fixes: 3d249d4ca7d0 ("net: introduce ethernet teaming device")
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/team/team.c | 16 +++++++++++++---
+ include/linux/if_team.h | 1 +
+ 2 files changed, 14 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -1615,7 +1615,6 @@ static int team_init(struct net_device *
+ int err;
+
+ team->dev = dev;
+- mutex_init(&team->lock);
+ team_set_no_mode(team);
+
+ team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
+@@ -1642,6 +1641,9 @@ static int team_init(struct net_device *
+ goto err_options_register;
+ netif_carrier_off(dev);
+
++ lockdep_register_key(&team->team_lock_key);
++ __mutex_init(&team->lock, "team->team_lock_key", &team->team_lock_key);
++
+ return 0;
+
+ err_options_register:
+@@ -1671,6 +1673,7 @@ static void team_uninit(struct net_devic
+ team_queue_override_fini(team);
+ mutex_unlock(&team->lock);
+ netdev_change_features(dev);
++ lockdep_unregister_key(&team->team_lock_key);
+ }
+
+ static void team_destructor(struct net_device *dev)
+@@ -1974,8 +1977,15 @@ static int team_del_slave(struct net_dev
+ err = team_port_del(team, port_dev);
+ mutex_unlock(&team->lock);
+
+- if (!err)
+- netdev_change_features(dev);
++ if (err)
++ return err;
++
++ if (netif_is_team_master(port_dev)) {
++ lockdep_unregister_key(&team->team_lock_key);
++ lockdep_register_key(&team->team_lock_key);
++ lockdep_set_class(&team->lock, &team->team_lock_key);
++ }
++ netdev_change_features(dev);
+
+ return err;
+ }
+--- a/include/linux/if_team.h
++++ b/include/linux/if_team.h
+@@ -223,6 +223,7 @@ struct team {
+ atomic_t count_pending;
+ struct delayed_work dw;
+ } mcast_rejoin;
++ struct lock_class_key team_lock_key;
+ long mode_priv[TEAM_MODE_PRIV_LONGS];
+ };
+
diff --git a/patches.suse/udp-fix-data-race-in-udp_set_dev_scratch.patch b/patches.suse/udp-fix-data-race-in-udp_set_dev_scratch.patch
new file mode 100644
index 0000000000..7e851b3ab1
--- /dev/null
+++ b/patches.suse/udp-fix-data-race-in-udp_set_dev_scratch.patch
@@ -0,0 +1,100 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 24 Oct 2019 11:43:31 -0700
+Subject: udp: fix data-race in udp_set_dev_scratch()
+Patch-mainline: v5.4-rc6
+Git-commit: a793183caa9afae907a0d7ddd2ffd57329369bf5
+References: bsc#1154353
+
+KCSAN reported a data-race in udp_set_dev_scratch() [1]
+
+The issue here is that we must not write over skb fields
+if skb is shared. A similar issue has been fixed in commit
+89c22d8c3b27 ("net: Fix skb csum races when peeking")
+
+While we are at it, use a helper only dealing with
+udp_skb_scratch(skb)->csum_unnecessary, as this allows
+udp_set_dev_scratch() to be called once and thus inlined.
+
+[1]
+BUG: KCSAN: data-race in udp_set_dev_scratch / udpv6_recvmsg
+
+write to 0xffff888120278317 of 1 bytes by task 10411 on cpu 1:
+ udp_set_dev_scratch+0xea/0x200 net/ipv4/udp.c:1308
+ __first_packet_length+0x147/0x420 net/ipv4/udp.c:1556
+ first_packet_length+0x68/0x2a0 net/ipv4/udp.c:1579
+ udp_poll+0xea/0x110 net/ipv4/udp.c:2720
+ sock_poll+0xed/0x250 net/socket.c:1256
+ vfs_poll include/linux/poll.h:90 [inline]
+ do_select+0x7d0/0x1020 fs/select.c:534
+ core_sys_select+0x381/0x550 fs/select.c:677
+ do_pselect.constprop.0+0x11d/0x160 fs/select.c:759
+ __do_sys_pselect6 fs/select.c:784 [inline]
+ __se_sys_pselect6 fs/select.c:769 [inline]
+ __x64_sys_pselect6+0x12e/0x170 fs/select.c:769
+ do_syscall_64+0xcc/0x370 arch/x86/entry/common.c:290
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+read to 0xffff888120278317 of 1 bytes by task 10413 on cpu 0:
+ udp_skb_csum_unnecessary include/net/udp.h:358 [inline]
+ udpv6_recvmsg+0x43e/0xe90 net/ipv6/udp.c:310
+ inet6_recvmsg+0xbb/0x240 net/ipv6/af_inet6.c:592
+ sock_recvmsg_nosec+0x5c/0x70 net/socket.c:871
+ ___sys_recvmsg+0x1a0/0x3e0 net/socket.c:2480
+ do_recvmmsg+0x19a/0x5c0 net/socket.c:2601
+ __sys_recvmmsg+0x1ef/0x200 net/socket.c:2680
+ __do_sys_recvmmsg net/socket.c:2703 [inline]
+ __se_sys_recvmmsg net/socket.c:2696 [inline]
+ __x64_sys_recvmmsg+0x89/0xb0 net/socket.c:2696
+ do_syscall_64+0xcc/0x370 arch/x86/entry/common.c:290
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 0 PID: 10413 Comm: syz-executor.0 Not tainted 5.4.0-rc3+ #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+
+Fixes: 2276f58ac589 ("udp: use a separate rx queue for packet reception")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Cc: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/ipv4/udp.c | 19 +++++++++++++++----
+ 1 file changed, 15 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1316,6 +1316,20 @@ static void udp_set_dev_scratch(struct s
+ scratch->_tsize_state |= UDP_SKB_IS_STATELESS;
+ }
+
++static void udp_skb_csum_unnecessary_set(struct sk_buff *skb)
++{
++ /* We come here after udp_lib_checksum_complete() returned 0.
++ * This means that __skb_checksum_complete() might have
++ * set skb->csum_valid to 1.
++ * On 64bit platforms, we can set csum_unnecessary
++ * to true, but only if the skb is not shared.
++ */
++#if BITS_PER_LONG == 64
++ if (!skb_shared(skb))
++ udp_skb_scratch(skb)->csum_unnecessary = true;
++#endif
++}
++
+ static int udp_skb_truesize(struct sk_buff *skb)
+ {
+ return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS;
+@@ -1550,10 +1564,7 @@ static struct sk_buff *__first_packet_le
+ *total += skb->truesize;
+ kfree_skb(skb);
+ } else {
+- /* the csum related bits could be changed, refresh
+- * the scratch area
+- */
+- udp_set_dev_scratch(skb);
++ udp_skb_csum_unnecessary_set(skb);
+ break;
+ }
+ }
diff --git a/patches.suse/udp-use-skb_queue_empty_lockless.patch b/patches.suse/udp-use-skb_queue_empty_lockless.patch
new file mode 100644
index 0000000000..7d6a7d693a
--- /dev/null
+++ b/patches.suse/udp-use-skb_queue_empty_lockless.patch
@@ -0,0 +1,96 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 23 Oct 2019 22:44:49 -0700
+Subject: udp: use skb_queue_empty_lockless()
+Patch-mainline: v5.4-rc6
+Git-commit: 137a0dbe3426fd7bcfe3f8117b36a87b3590e4eb
+References: bsc#1154353
+
+syzbot reported a data-race [1].
+
+We should use skb_queue_empty_lockless() to document that we are
+not ensuring a mutual exclusion and silence KCSAN.
+
+[1]
+BUG: KCSAN: data-race in __skb_recv_udp / __udp_enqueue_schedule_skb
+
+write to 0xffff888122474b50 of 8 bytes by interrupt on cpu 0:
+ __skb_insert include/linux/skbuff.h:1852 [inline]
+ __skb_queue_before include/linux/skbuff.h:1958 [inline]
+ __skb_queue_tail include/linux/skbuff.h:1991 [inline]
+ __udp_enqueue_schedule_skb+0x2c1/0x410 net/ipv4/udp.c:1470
+ __udp_queue_rcv_skb net/ipv4/udp.c:1940 [inline]
+ udp_queue_rcv_one_skb+0x7bd/0xc70 net/ipv4/udp.c:2057
+ udp_queue_rcv_skb+0xb5/0x400 net/ipv4/udp.c:2074
+ udp_unicast_rcv_skb.isra.0+0x7e/0x1c0 net/ipv4/udp.c:2233
+ __udp4_lib_rcv+0xa44/0x17c0 net/ipv4/udp.c:2300
+ udp_rcv+0x2b/0x40 net/ipv4/udp.c:2470
+ ip_protocol_deliver_rcu+0x4d/0x420 net/ipv4/ip_input.c:204
+ ip_local_deliver_finish+0x110/0x140 net/ipv4/ip_input.c:231
+ NF_HOOK include/linux/netfilter.h:305 [inline]
+ NF_HOOK include/linux/netfilter.h:299 [inline]
+ ip_local_deliver+0x133/0x210 net/ipv4/ip_input.c:252
+ dst_input include/net/dst.h:442 [inline]
+ ip_rcv_finish+0x121/0x160 net/ipv4/ip_input.c:413
+ NF_HOOK include/linux/netfilter.h:305 [inline]
+ NF_HOOK include/linux/netfilter.h:299 [inline]
+ ip_rcv+0x18f/0x1a0 net/ipv4/ip_input.c:523
+ __netif_receive_skb_one_core+0xa7/0xe0 net/core/dev.c:5010
+ __netif_receive_skb+0x37/0xf0 net/core/dev.c:5124
+ process_backlog+0x1d3/0x420 net/core/dev.c:5955
+
+read to 0xffff888122474b50 of 8 bytes by task 8921 on cpu 1:
+ skb_queue_empty include/linux/skbuff.h:1494 [inline]
+ __skb_recv_udp+0x18d/0x500 net/ipv4/udp.c:1653
+ udp_recvmsg+0xe1/0xb10 net/ipv4/udp.c:1712
+ inet_recvmsg+0xbb/0x250 net/ipv4/af_inet.c:838
+ sock_recvmsg_nosec+0x5c/0x70 net/socket.c:871
+ ___sys_recvmsg+0x1a0/0x3e0 net/socket.c:2480
+ do_recvmmsg+0x19a/0x5c0 net/socket.c:2601
+ __sys_recvmmsg+0x1ef/0x200 net/socket.c:2680
+ __do_sys_recvmmsg net/socket.c:2703 [inline]
+ __se_sys_recvmmsg net/socket.c:2696 [inline]
+ __x64_sys_recvmmsg+0x89/0xb0 net/socket.c:2696
+ do_syscall_64+0xcc/0x370 arch/x86/entry/common.c:290
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 1 PID: 8921 Comm: syz-executor.4 Not tainted 5.4.0-rc3+ #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/ipv4/udp.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1577,7 +1577,7 @@ static int first_packet_length(struct so
+
+ spin_lock_bh(&rcvq->lock);
+ skb = __first_packet_length(sk, rcvq, &total);
+- if (!skb && !skb_queue_empty(sk_queue)) {
++ if (!skb && !skb_queue_empty_lockless(sk_queue)) {
+ spin_lock(&sk_queue->lock);
+ skb_queue_splice_tail_init(sk_queue, rcvq);
+ spin_unlock(&sk_queue->lock);
+@@ -1650,7 +1650,7 @@ struct sk_buff *__skb_recv_udp(struct so
+ return skb;
+ }
+
+- if (skb_queue_empty(sk_queue)) {
++ if (skb_queue_empty_lockless(sk_queue)) {
+ spin_unlock_bh(&queue->lock);
+ goto busy_check;
+ }
+@@ -1676,7 +1676,7 @@ busy_check:
+ break;
+
+ sk_busy_loop(sk, flags & MSG_DONTWAIT);
+- } while (!skb_queue_empty(sk_queue));
++ } while (!skb_queue_empty_lockless(sk_queue));
+
+ /* sk_queue is empty, reader_queue may contain peeked packets */
+ } while (timeo &&
diff --git a/patches.suse/vxlan-add-adjacent-link-to-limit-depth-level.patch b/patches.suse/vxlan-add-adjacent-link-to-limit-depth-level.patch
new file mode 100644
index 0000000000..668669561a
--- /dev/null
+++ b/patches.suse/vxlan-add-adjacent-link-to-limit-depth-level.patch
@@ -0,0 +1,210 @@
+From: Taehee Yoo <ap420073@gmail.com>
+Date: Mon, 21 Oct 2019 18:47:57 +0000
+Subject: vxlan: add adjacent link to limit depth level
+Patch-mainline: v5.4-rc6
+Git-commit: 0ce1822c2a08f6e05e22239bcb1778dcc916c7bc
+References: bsc#1154353
+
+Current vxlan code doesn't limit the number of nested devices.
+Nested devices would be handled recursively and this routine needs
+huge stack memory. So, unlimited nested devices could make
+stack overflow.
+
+In order to fix this issue, this patch adds adjacent links.
+The adjacent link APIs internally check the depth level.
+
+Test commands:
+ ip link add dummy0 type dummy
+ ip link add vxlan0 type vxlan id 0 group 239.1.1.1 dev dummy0 \
+ dstport 4789
+ for i in {1..100}
+ do
+ let A=$i-1
+ ip link add vxlan$i type vxlan id $i group 239.1.1.1 \
+ dev vxlan$A dstport 4789
+ done
+ ip link del dummy0
+
+The top upper link is vxlan100 and the lowest link is vxlan0.
+When vxlan0 is deleting, the upper devices will be deleted recursively.
+It needs huge stack memory so it makes stack overflow.
+
+Splat looks like:
+[ 229.628477] =============================================================================
+[ 229.629785] BUG page->ptl (Not tainted): Padding overwritten. 0x0000000026abf214-0x0000000091f6abb2
+[ 229.629785] -----------------------------------------------------------------------------
+[ 229.629785]
+[ 229.655439] ==================================================================
+[ 229.629785] INFO: Slab 0x00000000ff7cfda8 objects=19 used=19 fp=0x00000000fe33776c flags=0x200000000010200
+[ 229.655688] BUG: KASAN: stack-out-of-bounds in unmap_single_vma+0x25a/0x2e0
+[ 229.655688] Read of size 8 at addr ffff888113076928 by task vlan-network-in/2334
+[ 229.655688]
+[ 229.629785] Padding 0000000026abf214: 00 80 14 0d 81 88 ff ff 68 91 81 14 81 88 ff ff ........h.......
+[ 229.629785] Padding 0000000001e24790: 38 91 81 14 81 88 ff ff 68 91 81 14 81 88 ff ff 8.......h.......
+[ 229.629785] Padding 00000000b39397c8: 33 30 62 a7 ff ff ff ff ff eb 60 22 10 f1 ff 1f 30b.......`"....
+[ 229.629785] Padding 00000000bc98f53a: 80 60 07 13 81 88 ff ff 00 80 14 0d 81 88 ff ff .`..............
+[ 229.629785] Padding 000000002aa8123d: 68 91 81 14 81 88 ff ff f7 21 17 a7 ff ff ff ff h........!......
+[ 229.629785] Padding 000000001c8c2369: 08 81 14 0d 81 88 ff ff 03 02 00 00 00 00 00 00 ................
+[ 229.629785] Padding 000000004e290c5d: 21 90 a2 21 10 ed ff ff 00 00 00 00 00 fc ff df !..!............
+[ 229.629785] Padding 000000000e25d731: 18 60 07 13 81 88 ff ff c0 8b 13 05 81 88 ff ff .`..............
+[ 229.629785] Padding 000000007adc7ab3: b3 8a b5 41 00 00 00 00 ...A....
+[ 229.629785] FIX page->ptl: Restoring 0x0000000026abf214-0x0000000091f6abb2=0x5a
+[ ... ]
+
+Fixes: acaf4e70997f ("net: vxlan: when lower dev unregisters remove vxlan dev as well")
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/vxlan.c | 53 ++++++++++++++++++++++++++++++++++++++++++----------
+ include/net/vxlan.h | 1
+ 2 files changed, 44 insertions(+), 10 deletions(-)
+
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -3566,10 +3566,13 @@ static int __vxlan_dev_create(struct net
+ {
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ struct vxlan_dev *vxlan = netdev_priv(dev);
++ struct net_device *remote_dev = NULL;
+ struct vxlan_fdb *f = NULL;
+ bool unregister = false;
++ struct vxlan_rdst *dst;
+ int err;
+
++ dst = &vxlan->default_dst;
+ err = vxlan_dev_configure(net, dev, conf, false, extack);
+ if (err)
+ return err;
+@@ -3577,14 +3580,14 @@ static int __vxlan_dev_create(struct net
+ dev->ethtool_ops = &vxlan_ethtool_ops;
+
+ /* create an fdb entry for a valid default destination */
+- if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
++ if (!vxlan_addr_any(&dst->remote_ip)) {
+ err = vxlan_fdb_create(vxlan, all_zeros_mac,
+- &vxlan->default_dst.remote_ip,
++ &dst->remote_ip,
+ NUD_REACHABLE | NUD_PERMANENT,
+ vxlan->cfg.dst_port,
+- vxlan->default_dst.remote_vni,
+- vxlan->default_dst.remote_vni,
+- vxlan->default_dst.remote_ifindex,
++ dst->remote_vni,
++ dst->remote_vni,
++ dst->remote_ifindex,
+ NTF_SELF, &f);
+ if (err)
+ return err;
+@@ -3595,26 +3598,41 @@ static int __vxlan_dev_create(struct net
+ goto errout;
+ unregister = true;
+
++ if (dst->remote_ifindex) {
++ remote_dev = __dev_get_by_index(net, dst->remote_ifindex);
++ if (!remote_dev)
++ goto errout;
++
++ err = netdev_upper_dev_link(remote_dev, dev, extack);
++ if (err)
++ goto errout;
++ }
++
+ err = rtnl_configure_link(dev, NULL);
+ if (err)
+- goto errout;
++ goto unlink;
+
+ if (f) {
+- vxlan_fdb_insert(vxlan, all_zeros_mac,
+- vxlan->default_dst.remote_vni, f);
++ vxlan_fdb_insert(vxlan, all_zeros_mac, dst->remote_vni, f);
+
+ /* notify default fdb entry */
+ err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f),
+ RTM_NEWNEIGH, true, extack);
+ if (err) {
+ vxlan_fdb_destroy(vxlan, f, false, false);
++ if (remote_dev)
++ netdev_upper_dev_unlink(remote_dev, dev);
+ goto unregister;
+ }
+ }
+
+ list_add(&vxlan->next, &vn->vxlan_list);
++ if (remote_dev)
++ dst->remote_dev = remote_dev;
+ return 0;
+-
++unlink:
++ if (remote_dev)
++ netdev_upper_dev_unlink(remote_dev, dev);
+ errout:
+ /* unregister_netdevice() destroys the default FDB entry with deletion
+ * notification. But the addition notification was not sent yet, so
+@@ -3932,11 +3950,12 @@ static int vxlan_changelink(struct net_d
+ struct netlink_ext_ack *extack)
+ {
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+- struct vxlan_rdst *dst = &vxlan->default_dst;
+ struct net_device *lowerdev;
+ struct vxlan_config conf;
++ struct vxlan_rdst *dst;
+ int err;
+
++ dst = &vxlan->default_dst;
+ err = vxlan_nl2conf(tb, data, dev, &conf, true, extack);
+ if (err)
+ return err;
+@@ -3946,6 +3965,11 @@ static int vxlan_changelink(struct net_d
+ if (err)
+ return err;
+
++ err = netdev_adjacent_change_prepare(dst->remote_dev, lowerdev, dev,
++ extack);
++ if (err)
++ return err;
++
+ /* handle default dst entry */
+ if (!vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip)) {
+ u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni);
+@@ -3962,6 +3986,8 @@ static int vxlan_changelink(struct net_d
+ NTF_SELF, true, extack);
+ if (err) {
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
++ netdev_adjacent_change_abort(dst->remote_dev,
++ lowerdev, dev);
+ return err;
+ }
+ }
+@@ -3979,6 +4005,11 @@ static int vxlan_changelink(struct net_d
+ if (conf.age_interval != vxlan->cfg.age_interval)
+ mod_timer(&vxlan->age_timer, jiffies);
+
++ netdev_adjacent_change_commit(dst->remote_dev, lowerdev, dev);
++ if (lowerdev && lowerdev != dst->remote_dev)
++ dst->remote_dev = lowerdev;
++
++ netdev_update_lockdep_key(lowerdev);
+ vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true);
+ return 0;
+ }
+@@ -3991,6 +4022,8 @@ static void vxlan_dellink(struct net_dev
+
+ list_del(&vxlan->next);
+ unregister_netdevice_queue(dev, head);
++ if (vxlan->default_dst.remote_dev)
++ netdev_upper_dev_unlink(vxlan->default_dst.remote_dev, dev);
+ }
+
+ static size_t vxlan_get_size(const struct net_device *dev)
+--- a/include/net/vxlan.h
++++ b/include/net/vxlan.h
+@@ -197,6 +197,7 @@ struct vxlan_rdst {
+ u8 offloaded:1;
+ __be32 remote_vni;
+ u32 remote_ifindex;
++ struct net_device *remote_dev;
+ struct list_head list;
+ struct rcu_head rcu;
+ struct dst_cache dst_cache;
diff --git a/patches.suse/vxlan-check-tun_info-options_len-properly.patch b/patches.suse/vxlan-check-tun_info-options_len-properly.patch
new file mode 100644
index 0000000000..4f67bfe803
--- /dev/null
+++ b/patches.suse/vxlan-check-tun_info-options_len-properly.patch
@@ -0,0 +1,36 @@
+From: Xin Long <lucien.xin@gmail.com>
+Date: Tue, 29 Oct 2019 01:24:32 +0800
+Subject: vxlan: check tun_info options_len properly
+Patch-mainline: v5.4-rc6
+Git-commit: eadf52cf1852196a1363044dcda22fa5d7f296f7
+References: bsc#1154353
+
+This patch is to improve the tun_info options_len by dropping
+the skb when TUNNEL_VXLAN_OPT is set but options_len is less
+than vxlan_metadata. This can void a potential out-of-bounds
+access on ip_tun_info.
+
+Fixes: ee122c79d422 ("vxlan: Flow based tunneling")
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/vxlan.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -2487,9 +2487,11 @@ static void vxlan_xmit_one(struct sk_buf
+ vni = tunnel_id_to_key32(info->key.tun_id);
+ ifindex = 0;
+ dst_cache = &info->dst_cache;
+- if (info->options_len &&
+- info->key.tun_flags & TUNNEL_VXLAN_OPT)
++ if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
++ if (info->options_len < sizeof(*md))
++ goto drop;
+ md = ip_tunnel_info_opts(info);
++ }
+ ttl = info->key.ttl;
+ tos = info->key.tos;
+ label = info->key.label;
diff --git a/patches.suse/vxlan-fix-unexpected-failure-of-vxlan_changelink.patch b/patches.suse/vxlan-fix-unexpected-failure-of-vxlan_changelink.patch
new file mode 100644
index 0000000000..5a0998acf3
--- /dev/null
+++ b/patches.suse/vxlan-fix-unexpected-failure-of-vxlan_changelink.patch
@@ -0,0 +1,56 @@
+From: Taehee Yoo <ap420073@gmail.com>
+Date: Wed, 30 Oct 2019 08:15:12 +0000
+Subject: vxlan: fix unexpected failure of vxlan_changelink()
+Patch-mainline: v5.4-rc6
+Git-commit: c6761cf521f9bffbdcbb619dba665ebf3bcefb1e
+References: bsc#1154353
+
+After commit 0ce1822c2a08 ("vxlan: add adjacent link to limit depth
+level"), vxlan_changelink() could fail because of
+netdev_adjacent_change_prepare().
+netdev_adjacent_change_prepare() returns -EEXIST when old lower device
+and new lower device are same.
+(old lower device is "dst->remote_dev" and new lower device is "lowerdev")
+So, before calling it, lowerdev should be NULL if these devices are same.
+
+Test command1:
+ ip link add dummy0 type dummy
+ ip link add vxlan0 type vxlan dev dummy0 dstport 4789 vni 1
+ ip link set vxlan0 type vxlan ttl 5
+ RTNETLINK answers: File exists
+
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Fixes: 0ce1822c2a08 ("vxlan: add adjacent link to limit depth level")
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/vxlan.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -3967,6 +3967,9 @@ static int vxlan_changelink(struct net_d
+ if (err)
+ return err;
+
++ if (dst->remote_dev == lowerdev)
++ lowerdev = NULL;
++
+ err = netdev_adjacent_change_prepare(dst->remote_dev, lowerdev, dev,
+ extack);
+ if (err)
+@@ -4008,10 +4011,10 @@ static int vxlan_changelink(struct net_d
+ mod_timer(&vxlan->age_timer, jiffies);
+
+ netdev_adjacent_change_commit(dst->remote_dev, lowerdev, dev);
+- if (lowerdev && lowerdev != dst->remote_dev)
++ if (lowerdev && lowerdev != dst->remote_dev) {
+ dst->remote_dev = lowerdev;
+-
+- netdev_update_lockdep_key(lowerdev);
++ netdev_update_lockdep_key(lowerdev);
++ }
+ vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true);
+ return 0;
+ }
diff --git a/patches.suse/xdp-Handle-device-unregister-for-devmap_hash-map-typ.patch b/patches.suse/xdp-Handle-device-unregister-for-devmap_hash-map-typ.patch
new file mode 100644
index 0000000000..bfc202f1a7
--- /dev/null
+++ b/patches.suse/xdp-Handle-device-unregister-for-devmap_hash-map-typ.patch
@@ -0,0 +1,74 @@
+From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= <toke@redhat.com>
+Date: Sat, 19 Oct 2019 13:19:31 +0200
+Subject: xdp: Handle device unregister for devmap_hash map type
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Patch-mainline: v5.4-rc6
+Git-commit: ce197d83a9fc42795c248c90983bf05faf0f013b
+References: bsc#1154353
+
+It seems I forgot to add handling of devmap_hash type maps to the device
+unregister hook for devmaps. This omission causes devices to not be
+properly released, which causes hangs.
+
+Fix this by adding the missing handler.
+
+Fixes: 6f9d451ab1a3 ("xdp: Add devmap_hash map type for looking up devices by hashed index")
+Reported-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Martin KaFai Lau <kafai@fb.com>
+Link: https://lore.kernel.org/bpf/20191019111931.2981954-1-toke@redhat.com
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ kernel/bpf/devmap.c | 31 +++++++++++++++++++++++++++++++
+ 1 file changed, 31 insertions(+)
+
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -719,6 +719,32 @@ const struct bpf_map_ops dev_map_hash_op
+ .map_check_btf = map_check_no_btf,
+ };
+
++static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
++ struct net_device *netdev)
++{
++ unsigned long flags;
++ u32 i;
++
++ spin_lock_irqsave(&dtab->index_lock, flags);
++ for (i = 0; i < dtab->n_buckets; i++) {
++ struct bpf_dtab_netdev *dev;
++ struct hlist_head *head;
++ struct hlist_node *next;
++
++ head = dev_map_index_hash(dtab, i);
++
++ hlist_for_each_entry_safe(dev, next, head, index_hlist) {
++ if (netdev != dev->dev)
++ continue;
++
++ dtab->items--;
++ hlist_del_rcu(&dev->index_hlist);
++ call_rcu(&dev->rcu, __dev_map_entry_free);
++ }
++ }
++ spin_unlock_irqrestore(&dtab->index_lock, flags);
++}
++
+ static int dev_map_notification(struct notifier_block *notifier,
+ ulong event, void *ptr)
+ {
+@@ -735,6 +761,11 @@ static int dev_map_notification(struct n
+ */
+ rcu_read_lock();
+ list_for_each_entry_rcu(dtab, &dev_map_list, list) {
++ if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
++ dev_map_hash_remove_netdev(dtab, netdev);
++ continue;
++ }
++
+ for (i = 0; i < dtab->map.max_entries; i++) {
+ struct bpf_dtab_netdev *dev, *odev;
+
diff --git a/patches.suse/xdp-Prevent-overflow-in-devmap_hash-cost-calculation.patch b/patches.suse/xdp-Prevent-overflow-in-devmap_hash-cost-calculation.patch
new file mode 100644
index 0000000000..5e9bb3a895
--- /dev/null
+++ b/patches.suse/xdp-Prevent-overflow-in-devmap_hash-cost-calculation.patch
@@ -0,0 +1,37 @@
+From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= <toke@redhat.com>
+Date: Thu, 17 Oct 2019 12:57:02 +0200
+Subject: xdp: Prevent overflow in devmap_hash cost calculation for 32-bit
+ builds
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Patch-mainline: v5.4-rc6
+Git-commit: 05679ca6feebc1ef3bf743563315d9975adcf6fb
+References: bsc#1154353
+
+Tetsuo pointed out that without an explicit cast, the cost calculation for
+devmap_hash type maps could overflow on 32-bit builds. This adds the
+missing cast.
+
+Fixes: 6f9d451ab1a3 ("xdp: Add devmap_hash map type for looking up devices by hashed index")
+Reported-by: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
+Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Yonghong Song <yhs@fb.com>
+Link: https://lore.kernel.org/bpf/20191017105702.2807093-1-toke@redhat.com
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ kernel/bpf/devmap.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -128,7 +128,7 @@ static int dev_map_init_map(struct bpf_d
+
+ if (!dtab->n_buckets) /* Overflow check */
+ return -EINVAL;
+- cost += sizeof(struct hlist_head) * dtab->n_buckets;
++ cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets;
+ }
+
+ /* if map size is larger than memlock limit, reject it */
diff --git a/patches.suse/xsk-Fix-registration-of-Rx-only-sockets.patch b/patches.suse/xsk-Fix-registration-of-Rx-only-sockets.patch
new file mode 100644
index 0000000000..ffb47afacd
--- /dev/null
+++ b/patches.suse/xsk-Fix-registration-of-Rx-only-sockets.patch
@@ -0,0 +1,53 @@
+From: Magnus Karlsson <magnus.karlsson@intel.com>
+Date: Mon, 21 Oct 2019 10:16:58 +0200
+Subject: xsk: Fix registration of Rx-only sockets
+Patch-mainline: v5.4-rc6
+Git-commit: 2afd23f78f39da84937006ecd24aa664a4ab052b
+References: bsc#1154353
+
+Having Rx-only AF_XDP sockets can potentially lead to a crash in the
+system by a NULL pointer dereference in xsk_umem_consume_tx(). This
+function iterates through a list of all sockets tied to a umem and
+checks if there are any packets to send on the Tx ring. Rx-only
+sockets do not have a Tx ring, so this will cause a NULL pointer
+dereference. This will happen if you have registered one or more
+Rx-only sockets to a umem and the driver is checking the Tx ring even
+on Rx, or if the XDP_SHARED_UMEM mode is used and there is a mix of
+Rx-only and other sockets tied to the same umem.
+
+Fixed by only putting sockets with a Tx component on the list that
+xsk_umem_consume_tx() iterates over.
+
+Fixes: ac98d8aab61b ("xsk: wire upp Tx zero-copy functions")
+Reported-by: Kal Cutter Conley <kal.conley@dectris.com>
+Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Jonathan Lemon <jonathan.lemon@gmail.com>
+Link: https://lore.kernel.org/bpf/1571645818-16244-1-git-send-email-magnus.karlsson@intel.com
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ net/xdp/xdp_umem.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/net/xdp/xdp_umem.c
++++ b/net/xdp/xdp_umem.c
+@@ -27,6 +27,9 @@ void xdp_add_sk_umem(struct xdp_umem *um
+ {
+ unsigned long flags;
+
++ if (!xs->tx)
++ return;
++
+ spin_lock_irqsave(&umem->xsk_list_lock, flags);
+ list_add_rcu(&xs->list, &umem->xsk_list);
+ spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
+@@ -36,6 +39,9 @@ void xdp_del_sk_umem(struct xdp_umem *um
+ {
+ unsigned long flags;
+
++ if (!xs->tx)
++ return;
++
+ spin_lock_irqsave(&umem->xsk_list_lock, flags);
+ list_del_rcu(&xs->list);
+ spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
diff --git a/series.conf b/series.conf
index ccf512f58d..0c223418c2 100644
--- a/series.conf
+++ b/series.conf
@@ -2624,10 +2624,16 @@
patches.suse/ocfs2-fix-panic-due-to-ocfs2_wq-is-null.patch
patches.suse/zram-fix-race-between-backing_dev_show-and-backing_d.patch
patches.suse/net-dsa-b53-Do-not-clear-existing-mirrored-port-mask.patch
+ patches.suse/rxrpc-Fix-call-ref-leak.patch
+ patches.suse/rxrpc-Fix-trace-after-put-looking-at-the-put-peer-re.patch
+ patches.suse/rxrpc-Fix-trace-after-put-looking-at-the-put-connect.patch
+ patches.suse/rxrpc-Fix-trace-after-put-looking-at-the-put-call-re.patch
+ patches.suse/rxrpc-rxrpc_peer-needs-to-hold-a-ref-on-the-rxrpc_lo.patch
patches.suse/llc-fix-sk_buff-leak-in-llc_sap_state_process.patch
patches.suse/llc-fix-sk_buff-leak-in-llc_conn_service.patch
patches.suse/llc-fix-another-potential-sk_buff-leak-in-llc_ui_sen.patch
patches.suse/llc-fix-sk_buff-refcounting-in-llc_conn_state_proces.patch
+ patches.suse/net-mlx5-DR-Allow-insertion-of-duplicate-rules.patch
patches.suse/net_sched-fix-backward-compatibility-for-TCA_KIND.patch
patches.suse/net_sched-fix-backward-compatibility-for-TCA_ACT_KIN.patch
patches.suse/NFC-pn533-fix-use-after-free-and-memleaks.patch
@@ -2635,12 +2641,31 @@
patches.suse/cfg80211-wext-avoid-copying-malformed-SSIDs.patch
patches.suse/nl80211-fix-memory-leak-in-nl80211_get_ftm_responder.patch
patches.suse/mac80211-accept-deauth-frames-in-IBSS-mode.patch
+ patches.suse/ip6erspan-remove-the-incorrect-mtu-limit-for-ip6ersp.patch
patches.suse/bonding-fix-potential-NULL-deref-in-bond_update_slav.patch
+ patches.suse/sctp-add-chunks-to-sk_backlog-when-the-newsk-sk_sock.patch
patches.suse/phylink-fix-kernel-doc-warnings.patch
patches.suse/act_mirred-Fix-mirred_init_module-error-handling.patch
+ patches.suse/netns-fix-NLM_F_ECHO-mechanism-for-RTM_NEWNSID.patch
+ patches.suse/netfilter-conntrack-avoid-possible-false-sharing.patch
patches.suse/tun-remove-possible-false-sharing-in-tun_flow_update.patch
+ patches.suse/net-avoid-possible-false-sharing-in-sk_leave_memory_.patch
+ patches.suse/net-add-READ-WRITE-_ONCE-annotations-on-rskq_accept_.patch
+ patches.suse/tcp-annotate-lockless-access-to-tcp_memory_pressure.patch
+ patches.suse/net-silence-KCSAN-warnings-around-sk_add_backlog-cal.patch
+ patches.suse/net-annotate-sk-sk_rcvlowat-lockless-reads.patch
+ patches.suse/net-silence-KCSAN-warnings-about-sk-sk_backlog.len-r.patch
patches.suse/rxrpc-Fix-possible-NULL-pointer-access-in-ICMP-handl.patch
patches.suse/xsk-Fix-crash-in-poll-when-device-does-not-support-n.patch
+ patches.suse/tcp-add-rcu-protection-around-tp-fastopen_rsk.patch
+ patches.suse/tcp-annotate-tp-rcv_nxt-lockless-reads.patch
+ patches.suse/tcp-annotate-tp-copied_seq-lockless-reads.patch
+ patches.suse/tcp-annotate-tp-write_seq-lockless-reads.patch
+ patches.suse/tcp-annotate-tp-snd_nxt-lockless-reads.patch
+ patches.suse/tcp-annotate-tp-urg_seq-lockless-reads.patch
+ patches.suse/tcp-annotate-sk-sk_rcvbuf-lockless-reads.patch
+ patches.suse/tcp-annotate-sk-sk_sndbuf-lockless-reads.patch
+ patches.suse/tcp-annotate-sk-sk_wmem_queued-lockless-reads.patch
patches.suse/net-ibmvnic-Fix-EOI-when-running-in-XIVE-mode.patch
patches.suse/netdevsim-Fix-error-handling-in-nsim_fib_init-and-ns.patch
patches.suse/net-aquantia-temperature-retrieval-fix.patch
@@ -2656,8 +2681,10 @@
patches.suse/ath10k-fix-latency-issue-for-QCA988x.patch
patches.suse/net-bcmgenet-Set-phydev-dev_flags-only-for-internal-.patch
patches.suse/net-ethernet-broadcom-have-drivers-select-DIMLIB-as-.patch
+ patches.suse/net-cavium-Use-the-correct-style-for-SPDX-License-Id.patch
patches.suse/net-avoid-errors-when-trying-to-pop-MLPS-header-on-n.patch
patches.suse/net-sched-fix-corrupted-L2-header-with-MPLS-push-and.patch
+ patches.suse/tcp-fix-a-possible-lockdep-splat-in-tcp_done.patch
patches.suse/net-avoid-potential-infinite-loop-in-tc_ctl_action.patch
patches.suse/sched-etf-Fix-ordering-of-packets-with-same-txtime.patch
patches.suse/sctp-change-sctp_prot-.no_autobind-with-true.patch
@@ -2674,9 +2701,15 @@
patches.suse/net-phy-bcm7xxx-define-soft_reset-for-40nm-EPHY.patch
patches.suse/net-bcmgenet-soft-reset-40nm-EPHYs-before-MAC-init.patch
patches.suse/net-bcmgenet-reset-40nm-EPHY-on-energy-detect.patch
+ patches.suse/net-ensure-correct-skb-tstamp-in-various-fragmenters.patch
+ patches.suse/mlxsw-spectrum_trap-Push-Ethernet-header-before-repo.patch
patches.suse/net-usb-lan78xx-Connect-PHY-before-registering-MAC.patch
+ patches.suse/net-hns3-fix-mis-counting-IRQ-vector-numbers-issue.patch
patches.suse/xen-netback-fix-error-path-of-xenvif_connect_data.patch
+ patches.suse/net-netem-fix-error-path-for-corrupted-GSO-frames.patch
+ patches.suse/net-netem-correct-the-parent-s-backlog-when-corrupte.patch
patches.suse/net-dsa-fix-switch-tree-list.patch
+ patches.suse/net-reorder-struct-net-fields-to-avoid-false-sharing.patch
patches.suse/irqchip-sifive-plic-Switch-to-fasteoi-flow.patch
patches.suse/x86-boot-64-Make-level2_kernel_pgt-pages-invalid-out.patch
patches.suse/x86-boot-64-Round-memory-hole-size-up-to-next-PMD-pa.patch
@@ -2771,9 +2804,114 @@
patches.suse/Revert-ALSA-hda-Flush-interrupts-on-disabling.patch
patches.suse/ALSA-hda-Fix-mutex-deadlock-in-HDMI-codec-driver.patch
patches.suse/ALSA-timer-Fix-mutex-deadlock-at-releasing-card.patch
+ patches.suse/RDMA-mlx5-Clear-old-rate-limit-when-closing-QP.patch
+ patches.suse/IB-hfi1-Avoid-excessive-retry-for-TID-RDMA-READ-requ.patch
+ patches.suse/IB-hfi1-Use-a-common-pad-buffer-for-9B-and-16B-packe.patch
+ patches.suse/iw_cxgb4-fix-ECN-check-on-the-passive-accept.patch
+ patches.suse/RDMA-iwcm-move-iw_rem_ref-calls-out-of-spinlock.patch
+ patches.suse/RDMA-siw-free-siw_base_qp-in-kref-release-routine.patch
+ patches.suse/RDMA-qedr-Fix-reported-firmware-version.patch
+ patches.suse/IB-core-Use-rdma_read_gid_l2_fields-to-compare-GID-L.patch
+ patches.suse/RDMA-uverbs-Prevent-potential-underflow.patch
+ patches.suse/RDMA-nldev-Skip-counter-if-port-doesn-t-match.patch
+ patches.suse/IB-core-Avoid-deadlock-during-netlink-message-handli.patch
+ patches.suse/RDMA-mlx5-Use-irq-xarray-locking-for-mkey_table.patch
+ patches.suse/RDMA-iw_cxgb4-Avoid-freeing-skb-twice-in-arp-failure.patch
+ patches.suse/RDMA-hns-Prevent-memory-leaks-of-eq-buf_list.patch
patches.suse/efi-tpm-Return-EINVAL-when-determining-tpm-final-eve.patch
+ patches.suse/SUNRPC-The-TCP-back-channel-mustn-t-disappear-while-.patch
+ patches.suse/SUNRPC-The-RDMA-back-channel-mustn-t-disappear-while.patch
+ patches.suse/SUNRPC-Destroy-the-back-channel-when-we-destroy-the-.patch
+ patches.suse/net-mlx5e-Tx-Fix-assumption-of-single-WQEBB-of-NOP-i.patch
+ patches.suse/net-mlx5e-Tx-Zero-memset-WQE-info-struct-upon-update.patch
+ patches.suse/net-mlx5e-kTLS-Release-reference-on-DUMPed-fragments.patch
+ patches.suse/net-mlx5e-kTLS-Size-of-a-Dump-WQE-is-fixed.patch
+ patches.suse/net-mlx5e-kTLS-Save-only-the-frag-page-to-release-at.patch
+ patches.suse/net-mlx5e-kTLS-Save-by-value-copy-of-the-record-frag.patch
+ patches.suse/net-mlx5e-kTLS-Fix-page-refcnt-leak-in-TX-resync-err.patch
+ patches.suse/net-mlx5e-kTLS-Fix-missing-SQ-edge-fill.patch
+ patches.suse/net-mlx5e-kTLS-Limit-DUMP-wqe-size.patch
+ patches.suse/net-mlx5e-kTLS-Remove-unneeded-cipher-type-checks.patch
+ patches.suse/net-mlx5e-kTLS-Save-a-copy-of-the-crypto-info.patch
+ patches.suse/net-mlx5e-kTLS-Enhance-TX-resync-flow.patch
+ patches.suse/net-mlx5e-TX-Fix-consumer-index-of-error-cqe-dump.patch
+ patches.suse/net-mlx5-prevent-memory-leak-in-mlx5_fpga_conn_creat.patch
+ patches.suse/net-mlx5-fix-memory-leak-in-mlx5_fw_fatal_reporter_d.patch
+ patches.suse/ipv4-fix-IPSKB_FRAG_PMTU-handling-with-fragmentation.patch
+ patches.suse/bnxt_en-Fix-the-size-of-devlink-MSIX-parameters.patch
+ patches.suse/bnxt_en-Fix-devlink-NVRAM-related-byte-order-related.patch
+ patches.suse/bnxt_en-Adjust-the-time-to-wait-before-polling-firmw.patch
+ patches.suse/bnxt_en-Minor-formatting-changes-in-FW-devlink_healt.patch
+ patches.suse/bnxt_en-Avoid-disabling-pci-device-in-bnxt_remove_on.patch
+ patches.suse/ipv6-include-net-addrconf.h-for-missing-declarations.patch
+ patches.suse/net-flow_dissector-switch-to-siphash.patch
+ patches.suse/keys-Fix-memory-leak-in-copy_net_ns.patch
+ patches.suse/net-core-limit-nested-device-depth.patch
+ patches.suse/net-core-add-generic-lockdep-keys.patch
+ patches.suse/bonding-fix-unexpected-IFF_BONDING-bit-unset.patch
+ patches.suse/bonding-use-dynamic-lockdep-key-instead-of-subclass.patch
+ patches.suse/team-fix-nested-locking-lockdep-warning.patch
+ patches.suse/net-core-add-ignore-flag-to-netdev_adjacent-structur.patch
+ patches.suse/vxlan-add-adjacent-link-to-limit-depth-level.patch
+ patches.suse/net-remove-unnecessary-variables-and-callback.patch
+ patches.suse/net-sch_generic-Use-pfifo_fast-as-fallback-scheduler.patch
+ patches.suse/netns-fix-GFP-flags-in-rtnl_net_notifyid.patch
+ patches.suse/cxgb4-request-the-TX-CIDX-updates-to-status-page.patch
+ patches.suse/net-Zeroing-the-structure-ethtool_wolinfo-in-ethtool.patch
+ patches.suse/ipv4-fix-route-update-on-metric-change.patch
+ patches.suse/bpf-lwtunnel-Fix-reroute-supplying-invalid-dst.patch
+ patches.suse/xdp-Prevent-overflow-in-devmap_hash-cost-calculation.patch
+ patches.suse/xdp-Handle-device-unregister-for-devmap_hash-map-typ.patch
patches.suse/bpf-Fix-use-after-free-in-subprog-s-jited-symbol-rem.patch
patches.suse/bpf-Fix-use-after-free-in-bpf_get_prog_name.patch
+ patches.suse/xsk-Fix-registration-of-Rx-only-sockets.patch
+ patches.suse/netfilter-nf_flow_table-set-timeout-before-insertion.patch
+ patches.suse/netfilter-nft_payload-fix-missing-check-for-matching.patch
+ patches.suse/ipvs-don-t-ignore-errors-in-case-refcounting-ip_vs-m.patch
+ patches.suse/ipvs-move-old_secure_tcp-into-struct-netns_ipvs.patch
+ patches.suse/net-add-skb_queue_empty_lockless.patch
+ patches.suse/udp-use-skb_queue_empty_lockless.patch
+ patches.suse/net-use-skb_queue_empty_lockless-in-poll-handlers.patch
+ patches.suse/net-use-skb_queue_empty_lockless-in-busy-poll-contex.patch
+ patches.suse/net-add-READ_ONCE-annotation-in-__skb_wait_for_more_.patch
+ patches.suse/udp-fix-data-race-in-udp_set_dev_scratch.patch
+ patches.suse/net-fix-sk_page_frag-recursion-from-memory-reclaim.patch
+ patches.suse/net-hisilicon-Fix-Trying-to-free-already-free-IRQ.patch
+ patches.suse/net-mlx4_core-Dynamically-set-guaranteed-amount-of-c.patch
+ patches.suse/net-hisilicon-Fix-ping-latency-when-deal-with-high-t.patch
+ patches.suse/erspan-fix-the-tun_info-options_len-check-for-erspan.patch
+ patches.suse/vxlan-check-tun_info-options_len-properly.patch
+ patches.suse/bonding-fix-using-uninitialized-mode_lock.patch
+ patches.suse/net-rtnetlink-fix-a-typo-fbd-fdb.patch
+ patches.suse/net-mlx5e-Determine-source-port-properly-for-vlan-pu.patch
+ patches.suse/net-mlx5e-Remove-incorrect-match-criteria-assignment.patch
+ patches.suse/net-mlx5e-Replace-kfree-with-kvfree-when-free-vhca-s.patch
+ patches.suse/net-mlx5e-Only-skip-encap-flows-update-when-encap-in.patch
+ patches.suse/net-mlx5-Fix-rtable-reference-leak.patch
+ patches.suse/net-mlx5-Fix-NULL-pointer-dereference-in-extended-de.patch
+ patches.suse/net-mlx5e-Don-t-store-direct-pointer-to-action-s-tun.patch
+ patches.suse/net-mlx5e-Fix-handling-of-compressed-CQEs-in-case-of.patch
+ patches.suse/net-mlx5e-Fix-ethtool-self-test-link-speed.patch
+ patches.suse/net-mlx5e-Initialize-on-stack-link-modes-bitmap.patch
+ patches.suse/qed-fix-spelling-mistake-queuess-queues.patch
+ patches.suse/vxlan-fix-unexpected-failure-of-vxlan_changelink.patch
+ patches.suse/qed-Optimize-execution-time-for-nvm-attributes-confi.patch
+ patches.suse/mlxsw-core-Unpublish-devlink-parameters-during-reloa.patch
+ patches.suse/net-annotate-accesses-to-sk-sk_incoming_cpu.patch
+ patches.suse/net-annotate-lockless-accesses-to-sk-sk_napi_id.patch
+ patches.suse/cxgb4-fix-panic-when-attaching-to-ULD-fail.patch
+ patches.suse/netdevsim-Fix-use-after-free-during-device-dismantle.patch
+ patches.suse/tcp-increase-tcp_max_syn_backlog-max-value.patch
+ patches.suse/igb-igc-Don-t-warn-on-fatal-read-failures-when-the-d.patch
+ patches.suse/igb-Enable-media-autosense-for-the-i350.patch
+ patches.suse/igb-Fix-constant-media-auto-sense-switching-when-no-.patch
+ patches.suse/i40e-Fix-receive-buffer-starvation-for-AF_XDP.patch
+ patches.suse/e1000-fix-memory-leaks.patch
+ patches.suse/ixgbe-Remove-duplicate-clear_bit-call.patch
+ patches.suse/inet-stop-leaking-jiffies-on-the-wire.patch
+ patches.suse/net-phylink-Fix-phylink_dbg-macro.patch
+ patches.suse/net-cls_bpf-fix-NULL-deref-on-offload-filter-removal.patch
+ patches.suse/net-fix-installing-orphaned-programs.patch
patches.suse/powerpc-powernv-Fix-CPU-idle-to-be-called-with-IRQs-.patch
patches.suse/scsi-qla2xxx-fixup-incorrect-usage-of-host_byte.patch
patches.suse/scsi-lpfc-Check-queue-pointer-before-use.patch
diff --git a/supported.conf b/supported.conf
index d560e3bf6b..db0745909f 100644
--- a/supported.conf
+++ b/supported.conf
@@ -1016,7 +1016,7 @@
drivers/infiniband/hw/bnxt_re/bnxt_re
drivers/infiniband/hw/cxgb3/iw_cxgb3
drivers/infiniband/hw/cxgb4/iw_cxgb4
-- drivers/infiniband/hw/efa/efa
+ drivers/infiniband/hw/efa/efa
drivers/infiniband/hw/hfi1/hfi1
drivers/infiniband/hw/hns/hns-roce-hw-v1
drivers/infiniband/hw/hns/hns-roce-hw-v2
@@ -1031,7 +1031,7 @@
drivers/infiniband/hw/vmw_pvrdma/vmw_pvrdma
drivers/infiniband/sw/rdmavt/rdmavt
drivers/infiniband/sw/rxe/rdma_rxe
-- drivers/infiniband/sw/siw/siw
+ drivers/infiniband/sw/siw/siw
drivers/infiniband/ulp/ipoib/ib_ipoib
drivers/infiniband/ulp/iser/ib_iser
drivers/infiniband/ulp/isert/ib_isert