Home Home > GIT Browse > SLE12-SP4
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohannes Thumshirn <jthumshirn@suse.de>2019-04-16 09:56:48 +0200
committerJohannes Thumshirn <jthumshirn@suse.de>2019-04-16 09:56:48 +0200
commit10e910d5ed11c38685c90dbe161840d07a98c7d2 (patch)
tree8f114ac7eb9a1bde7d8f3861dce10860ef031ac4
parent1cfe0916dd583d4e893166cab6e71ced26b2a404 (diff)
parentdc8dc2062212ac4b805ede4db3e39b7e961b93e5 (diff)
Merge remote-tracking branch 'origin/SLE15' into SLE12-SP4SLE12-SP4
Conflicts: patches.kabi/kabi-cxgb4-MU.patch series.conf
-rw-r--r--blacklist.conf9
-rw-r--r--patches.arch/powerpc-64s-Fix-logic-when-handling-unknown-CPU-feat.patch83
-rw-r--r--patches.arch/powerpc-powernv-Make-opal-log-only-readable-by-root.patch41
-rw-r--r--patches.drivers/ALSA-hda-Add-two-more-machines-to-the-power_save_bla.patch48
-rw-r--r--patches.drivers/ALSA-seq-Fix-OOB-reads-from-strlcpy.patch55
-rw-r--r--patches.drivers/NFC-nci-Add-some-bounds-checking-in-nci_hci_cmd_rece.patch57
-rw-r--r--patches.drivers/ibmvnic-Enable-GRO.patch35
-rw-r--r--patches.drivers/ibmvnic-Fix-netdev-feature-clobbering-during-a-reset.patch90
-rw-r--r--patches.drivers/iommu-amd-set-exclusion-range-correctly35
-rw-r--r--patches.drivers/mmc-tmio_mmc_core-don-t-claim-spurious-interrupts.patch72
-rw-r--r--patches.drivers/staging-vt6655-Fix-interrupt-race-condition-on-devic.patch70
-rw-r--r--patches.drivers/staging-vt6655-Remove-vif-check-from-vnt_interrupt.patch39
-rw-r--r--patches.drivers/vfio-type1-limit-dma-mappings-per-container94
-rw-r--r--patches.fixes/0001-Collect-descriptors-of-all-ULD-and-LLD-hardware-queu.patch503
-rw-r--r--patches.fixes/0001-cxgb4-Add-capability-to-get-set-SGE-Doorbell-Queue-T.patch268
-rw-r--r--patches.fixes/0001-cxgb4-Add-new-T5-PCI-device-id-0x50ae.patch28
-rw-r--r--patches.fixes/0001-cxgb4-Add-support-for-FW_ETH_TX_PKT_VM_WR.patch483
-rw-r--r--patches.fixes/0001-cxgb4-Added-missing-break-in-ndo_udp_tunnel_-add-del.patch42
-rw-r--r--patches.fixes/0001-cxgb4-Support-ethtool-private-flags.patch109
-rw-r--r--patches.fixes/0001-cxgb4-cxgb4vf-Add-support-for-SGE-doorbell-queue-tim.patch874
-rw-r--r--patches.fixes/0001-cxgb4-cxgb4vf-Link-management-changes.patch229
-rw-r--r--patches.fixes/0001-cxgb4-do-not-return-DUPLEX_UNKNOWN-when-link-is-down.patch48
-rw-r--r--patches.fixes/0001-futex-Ensure-that-futex-address-is-aligned-in-handle.patch51
-rw-r--r--patches.fixes/0009-xfs-rewrite-getbmap-using-the-xfs_iext_-helpers.patch15
-rw-r--r--patches.fixes/KEYS-always-initialize-keyring_index_key-desc_len.patch104
-rw-r--r--patches.fixes/KEYS-user-Align-the-payload-buffer.patch47
-rw-r--r--patches.fixes/crypto-pcbc-remove-bogus-memcpy-s-with-src-dest.patch92
-rw-r--r--patches.fixes/fs-avoid-fdput-after-failed-fdget-in-vfs_dedupe_file.patch49
-rw-r--r--patches.fixes/ring-buffer-Check-if-memory-is-available-before-allo.patch72
-rw-r--r--patches.fixes/tracing-hrtimer-Fix-tracing-bugs-by-taking-all-clock.patch73
-rw-r--r--patches.fixes/vfs-Avoid-softlockups-in-drop_pagecache_sb.patch3
-rw-r--r--patches.fixes/vfs-exit-early-from-zero-length-remap-operations.patch32
-rw-r--r--patches.fixes/vfs-limit-size-of-dedupe.patch33
-rw-r--r--patches.fixes/xfs-allow-xfs_lock_two_inodes-to-take-different-EXCL.patch193
-rw-r--r--patches.fixes/xfs-call-xfs_qm_dqattach-before-performing-reflink-o.patch36
-rw-r--r--patches.fixes/xfs-cap-the-length-of-deduplication-requests.patch50
-rw-r--r--patches.fixes/xfs-clean-up-xfs_reflink_remap_blocks-call-site.patch109
-rw-r--r--patches.fixes/xfs-fix-data-corruption-w-unaligned-dedupe-ranges.patch70
-rw-r--r--patches.fixes/xfs-fix-data-corruption-w-unaligned-reflink-ranges.patch109
-rw-r--r--patches.fixes/xfs-fix-pagecache-truncation-prior-to-reflink.patch40
-rw-r--r--patches.fixes/xfs-flush-removing-page-cache-in-xfs_reflink_remap_p.patch94
-rw-r--r--patches.fixes/xfs-only-grab-shared-inode-locks-for-source-file-dur.patch122
-rw-r--r--patches.fixes/xfs-refactor-clonerange-preparation-into-a-separate-.patch192
-rw-r--r--patches.fixes/xfs-reflink-find-shared-should-take-a-transaction.patch121
-rw-r--r--patches.fixes/xfs-reflink-should-break-pnfs-leases-before-sharing-.patch91
-rw-r--r--patches.fixes/xfs-remove-dest-file-s-post-eof-preallocations-befor.patch54
-rw-r--r--patches.fixes/xfs-update-ctime-and-remove-suid-before-cloning-file.patch66
-rw-r--r--patches.fixes/xfs-zero-posteof-blocks-when-cloning-above-eof.patch78
-rw-r--r--patches.kabi/kabi-restore-icmp_send.patch55
-rw-r--r--patches.suse/0001-x86-tsc-Force-inlining-of-cyc2ns-bits.patch61
-rw-r--r--patches.suse/bnxt_en-Drop-oversize-TX-packets-to-prevent-errors.patch43
-rw-r--r--patches.suse/do-not-default-to-ibrs-on-skl.patch54
-rw-r--r--patches.suse/dsa-mv88e6xxx-Ensure-all-pending-interrupts-are-hand.patch89
-rw-r--r--patches.suse/hv_netvsc-Fix-IP-header-checksum-for-coalesced-packe.patch57
-rw-r--r--patches.suse/ipv4-Return-error-for-RTA_VIA-attribute.patch43
-rw-r--r--patches.suse/ipv6-Return-error-for-RTA_VIA-attribute.patch43
-rw-r--r--patches.suse/ipv6-propagate-genlmsg_reply-return-code.patch35
-rw-r--r--patches.suse/ipvlan-disallow-userns-cap_net_admin-to-change-globa.patch128
-rw-r--r--patches.suse/kcm-switch-order-of-device-registration-to-fix-a-cra.patch88
-rw-r--r--patches.suse/missing-barriers-in-some-of-unix_sock-addr-and-path-.patch231
-rw-r--r--patches.suse/mpls-Return-error-for-RTA_GATEWAY-attribute.patch32
-rw-r--r--patches.suse/net-Add-__icmp_send-helper.patch73
-rw-r--r--patches.suse/net-Add-header-for-usage-of-fls64.patch25
-rw-r--r--patches.suse/net-Do-not-allocate-page-fragments-that-are-not-skb-.patch43
-rw-r--r--patches.suse/net-Fix-for_each_netdev_feature-on-Big-endian.patch86
-rw-r--r--patches.suse/net-Set-rtm_table-to-RT_TABLE_COMPAT-for-ipv6-for-ta.patch30
-rw-r--r--patches.suse/net-avoid-false-positives-in-untrusted-gso-validatio.patch51
-rw-r--r--patches.suse/net-avoid-use-IPCB-in-cipso_v4_error.patch101
-rw-r--r--patches.suse/net-dsa-mv88e6xxx-Fix-u64-statistics.patch31
-rw-r--r--patches.suse/net-fix-IPv6-prefix-route-residue.patch44
-rw-r--r--patches.suse/net-hsr-fix-memory-leak-in-hsr_dev_finalize.patch109
-rw-r--r--patches.suse/net-hsr-fix-possible-crash-in-add_timer.patch133
-rw-r--r--patches.suse/net-mlx5e-Don-t-overwrite-pedit-action-when-multiple.patch97
-rw-r--r--patches.suse/net-nfc-Fix-NULL-dereference-on-nfc_llcp_build_tlv-f.patch154
-rw-r--r--patches.suse/net-packet-Set-__GFP_NOWARN-upon-allocation-in-alloc.patch79
-rw-r--r--patches.suse/net-packet-fix-4gb-buffer-limit-due-to-overflow-chec.patch37
-rw-r--r--patches.suse/net-sit-fix-UBSAN-Undefined-behaviour-in-check_6rd.patch74
-rw-r--r--patches.suse/net-sit-fix-memory-leak-in-sit_init_net.patch51
-rw-r--r--patches.suse/net-socket-set-sock-sk-to-NULL-after-calling-proto_o.patch83
-rw-r--r--patches.suse/net-validate-untrusted-gso-packets-without-csum-offl.patch62
-rw-r--r--patches.suse/net-x25-fix-a-race-in-x25_bind.patch136
-rw-r--r--patches.suse/net-x25-fix-use-after-free-in-x25_device_event.patch148
-rw-r--r--patches.suse/net-x25-reset-state-in-x25_connect.patch84
-rw-r--r--patches.suse/net_sched-fix-two-more-memory-leaks-in-cls_tcindex.patch89
-rw-r--r--patches.suse/netlabel-fix-out-of-bounds-memory-accesses.patch51
-rw-r--r--patches.suse/qmi_wwan-Add-support-for-Quectel-EG12-EM12.patch83
-rw-r--r--patches.suse/ravb-Decrease-TxFIFO-depth-of-Q3-and-Q2-to-one.patch46
-rw-r--r--patches.suse/route-set-the-deleted-fnhe-fnhe_daddr-to-0-in-ip_del.patch59
-rw-r--r--patches.suse/rxrpc-Fix-client-call-queueing-waiting-for-channel.patch47
-rw-r--r--patches.suse/sctp-call-gso_reset_checksum-when-computing-checksum.patch73
-rw-r--r--patches.suse/sit-check-if-IPv6-enabled-before-calling-ip6_err_gen.patch50
-rw-r--r--patches.suse/tcp-tcp_v4_err-should-be-more-careful.patch44
-rw-r--r--patches.suse/tipc-fix-race-condition-causing-hung-sendto.patch83
-rw-r--r--patches.suse/tun-fix-blocking-read.patch40
-rw-r--r--patches.suse/tun-remove-unnecessary-memory-barrier.patch29
-rw-r--r--patches.suse/vxlan-test-dev-flags-IFF_UP-before-calling-netif_rx.patch80
-rw-r--r--patches.suse/xen-netback-don-t-populate-the-hash-cache-on-XenBus-.patch53
-rw-r--r--patches.suse/xen-netback-fix-occasional-leak-of-grant-ref-mapping.patch63
-rw-r--r--series.conf102
-rw-r--r--supported.conf2
100 files changed, 8979 insertions, 13 deletions
diff --git a/blacklist.conf b/blacklist.conf
index 4cfe0400c5..38f2ccf4d2 100644
--- a/blacklist.conf
+++ b/blacklist.conf
@@ -486,12 +486,15 @@ a81d1ab3cad77e20c2df8baef0a35a4980fc511c # nfc: revert: not applicable
ba552399954dde1b388f7749fecad5c349216981 # printk: fixes 719f6a7040f1bdaf96fcc that is needed only when printing trace buffer when panicing in NMI (bsc#1112173)
a338f84dc196f44b63ba0863d2f34fd9b1613572 # printk: fixes 719f6a7040f1bdaf96fcc that is needed only when printing trace buffer when panicing in NMI (bsc#1112173)
03fc7f9c99c1e7ae2925d459e8487f1a6f199f79 # printk: fixes 719f6a7040f1bdaf96fcc that is needed only when printing trace buffer when panicing in NMI (bsc#1112173)
+c3fee60908db4a8594f2e4a2131998384b8fa006 # printk: cosmetic; anyway, it fixes a commit that we do not have in SLE15
+b60706644282af04e4aa57da5af57470d453cd1f # vsprintf: cosmetic
741a76b350897604c48fb12beff1c9b77724dc96 # kthread: fixes rather rare races in CPU hotplug; there are several followup fixes on top of it to get it actually right; does not worth the risk
4950276672fce5c241857540f8561c440663673d # kmemcheck removal; not for released products
d8be75663cec0069b85f80191abd2682ce4a512f # related to kmemcheck removal; not for released products
1f2cac107c591c24b60b115d6050adc213d10fc0 # blktrace: racy init/start-stop/teardown for ages; not worth it
a6da0024ffc19e0d47712bb5ca4fd083f76b07df # blktrace: fix unlocked registration of tracepoints; racy for ages; found by syzcaller; not worth it
6b7e633fe9c24682df550e5311f47fb524701586 # ring_buffer: just an optimization
+23721a755f98ac846897a013c92cccb281c1bcc8 # trace/xdp: compilation warning; we do not have the affected code
7685ab6c58557c6234f3540260195ecbee7fc4b3 # tracing: we do not have support for recording tgid of tasks (bsc#1112221)
90e406f96f630c07d631a021fd4af10aac913e77 # tracing: just a memory optimization
68e76e034b6b1c1ce2eece1ab8ae4008e14be470 # tracing: dependency fix found by randconfig; prevents a huge build
@@ -499,6 +502,11 @@ a6da0024ffc19e0d47712bb5ca4fd083f76b07df # blktrace: fix unlocked registration o
73c8d8945505acdcbae137c2e00a1232e0be709f # tracing: just a small annoyance when switching buffer snapshot
57ea2a34adf40f3a6e88409aafcf803b8945619a # tracing/kprobes: old bug; hard to trigger; rather just annoying
f143641bfef9a4a60c57af30de26c63057e7e695 # tracing: old and not serious bug
+a15f7fc20389a8827d5859907568b201234d4b79 # tracing: nothing serious
+26b68dd2f48fe7699a89f0cfbb9f4a650dc1c837 # tracing: cosmetic
+f8494fa3dd10b52eab47a9666a8bc34719a129aa # tracing: affected code is not in our kernel
+2519c1bbe38d7acacc9aacba303ca6f97482ed53 # tracing: affected code is not in our kernel
+9e7382153f80ba45a0bbcd540fb77d4b15f6e966 # tracing: affected code is not in our kernel
8114865ff82e200b383e46821c25cb0625b842b5 # ftrace: fixes a race when calculating timings in graph_tracer when options/graph-time is set to zero; non-trivial change; let's ignore until anyone complains
07f7175b43827640d1e69c9eded89aa089a234b4 # ftrace: see above 8114865ff82e200b383
f1f5b14afd7cce39e6a9b25c685e1ea34c231096 # ftrace: see above 8114865ff82e200b383
@@ -1053,3 +1061,4 @@ dfa88658fb0583abb92e062c7a9cd5a5b94f2a46 # powerpc/fsl: Update Spectre v2 report
0bbea75c476b77fa7d7811d6be911cc7583e640f # powerpc/traps: fix recoverability of machine check handling on book3s/32
179ab1cbf883575c3a585bcfc0f2160f1d22a149 # powerpc/64: Add CONFIG_PPC_BARRIER_NOSPEC - too invasive, we build this anyway
a89e7bcb18081c611eb6cf50edd440fa4983a71a # too invasive, would break kABI in the prerequisites
+47b16820c490149c2923e8474048f2c6e7557cab # Xilinx SystemACE is unsupported
diff --git a/patches.arch/powerpc-64s-Fix-logic-when-handling-unknown-CPU-feat.patch b/patches.arch/powerpc-64s-Fix-logic-when-handling-unknown-CPU-feat.patch
new file mode 100644
index 0000000000..e2745eb029
--- /dev/null
+++ b/patches.arch/powerpc-64s-Fix-logic-when-handling-unknown-CPU-feat.patch
@@ -0,0 +1,83 @@
+From 8cfaf106918a8c13abb24c641556172afbb9545c Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Mon, 11 Feb 2019 11:20:01 +1100
+Subject: [PATCH] powerpc/64s: Fix logic when handling unknown CPU features
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+References: bsc#1055117
+Patch-mainline: v5.1-rc1
+Git-commit: 8cfaf106918a8c13abb24c641556172afbb9545c
+
+In cpufeatures_process_feature(), if a provided CPU feature is unknown and
+enable_unknown is false, we erroneously print that the feature is being
+enabled and return true, even though no feature has been enabled, and
+may also set feature bits based on the last entry in the match table.
+
+Fix this so that we only set feature bits from the match table if we have
+actually enabled a feature from that table, and when failing to enable an
+unknown feature, always print the "not enabling" message and return false.
+
+Coincidentally, some older gccs (<GCC 7), when invoked with
+-fsanitize-coverage=trace-pc, cause a spurious uninitialised variable
+warning in this function:
+
+ arch/powerpc/kernel/dt_cpu_ftrs.c: In function ‘cpufeatures_process_feature’:
+ arch/powerpc/kernel/dt_cpu_ftrs.c:686:7: warning: ‘m’ may be used uninitialized in this function [-Wmaybe-uninitialized]
+ if (m->cpu_ftr_bit_mask)
+
+An upcoming patch will enable support for kcov, which requires this option.
+This patch avoids the warning.
+
+Fixes: 5a61ef74f269 ("powerpc/64s: Support new device tree binding for discovering CPU features")
+Reported-by: Segher Boessenkool <segher@kernel.crashing.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+[ajd: add commit message]
+Signed-off-by: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+---
+ arch/powerpc/kernel/dt_cpu_ftrs.c | 17 +++++++----------
+ 1 file changed, 7 insertions(+), 10 deletions(-)
+
+diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
+index 8be3721d9302..e49bd5efcfe6 100644
+--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
++++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
+@@ -666,8 +666,10 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
+ m = &dt_cpu_feature_match_table[i];
+ if (!strcmp(f->name, m->name)) {
+ known = true;
+- if (m->enable(f))
++ if (m->enable(f)) {
++ cur_cpu_spec->cpu_features |= m->cpu_ftr_bit_mask;
+ break;
++ }
+
+ pr_info("not enabling: %s (disabled or unsupported by kernel)\n",
+ f->name);
+@@ -675,17 +677,12 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
+ }
+ }
+
+- if (!known && enable_unknown) {
+- if (!feat_try_enable_unknown(f)) {
+- pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
+- f->name);
+- return false;
+- }
++ if (!known && (!enable_unknown || !feat_try_enable_unknown(f))) {
++ pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
++ f->name);
++ return false;
+ }
+
+- if (m->cpu_ftr_bit_mask)
+- cur_cpu_spec->cpu_features |= m->cpu_ftr_bit_mask;
+-
+ if (known)
+ pr_debug("enabling: %s\n", f->name);
+ else
+--
+2.20.1
+
diff --git a/patches.arch/powerpc-powernv-Make-opal-log-only-readable-by-root.patch b/patches.arch/powerpc-powernv-Make-opal-log-only-readable-by-root.patch
new file mode 100644
index 0000000000..a1a30b58c5
--- /dev/null
+++ b/patches.arch/powerpc-powernv-Make-opal-log-only-readable-by-root.patch
@@ -0,0 +1,41 @@
+From 7b62f9bd2246b7d3d086e571397c14ba52645ef1 Mon Sep 17 00:00:00 2001
+From: Jordan Niethe <jniethe5@gmail.com>
+Date: Wed, 27 Feb 2019 14:02:29 +1100
+Subject: [PATCH] powerpc/powernv: Make opal log only readable by root
+
+References: bsc#1065729
+Patch-mainline: v5.1-rc1
+Git-commit: 7b62f9bd2246b7d3d086e571397c14ba52645ef1
+
+Currently the opal log is globally readable. It is kernel policy to
+limit the visibility of physical addresses / kernel pointers to root.
+Given this and the fact the opal log may contain this information it
+would be better to limit the readability to root.
+
+Fixes: bfc36894a48b ("powerpc/powernv: Add OPAL message log interface")
+Cc: stable@vger.kernel.org # v3.15+
+Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
+Reviewed-by: Stewart Smith <stewart@linux.ibm.com>
+Reviewed-by: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+---
+ arch/powerpc/platforms/powernv/opal-msglog.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/platforms/powernv/opal-msglog.c b/arch/powerpc/platforms/powernv/opal-msglog.c
+index acd3206dfae3..06628c71cef6 100644
+--- a/arch/powerpc/platforms/powernv/opal-msglog.c
++++ b/arch/powerpc/platforms/powernv/opal-msglog.c
+@@ -98,7 +98,7 @@ static ssize_t opal_msglog_read(struct file *file, struct kobject *kobj,
+ }
+
+ static struct bin_attribute opal_msglog_attr = {
+- .attr = {.name = "msglog", .mode = 0444},
++ .attr = {.name = "msglog", .mode = 0400},
+ .read = opal_msglog_read
+ };
+
+--
+2.20.1
+
diff --git a/patches.drivers/ALSA-hda-Add-two-more-machines-to-the-power_save_bla.patch b/patches.drivers/ALSA-hda-Add-two-more-machines-to-the-power_save_bla.patch
new file mode 100644
index 0000000000..acf13b6946
--- /dev/null
+++ b/patches.drivers/ALSA-hda-Add-two-more-machines-to-the-power_save_bla.patch
@@ -0,0 +1,48 @@
+From cae30527901d9590db0e12ace994c1d58bea87fd Mon Sep 17 00:00:00 2001
+From: Hui Wang <hui.wang@canonical.com>
+Date: Mon, 8 Apr 2019 15:58:11 +0800
+Subject: [PATCH] ALSA: hda - Add two more machines to the power_save_blacklist
+Git-commit: cae30527901d9590db0e12ace994c1d58bea87fd
+Patch-mainline: v5.1-rc5
+References: bsc#1051510
+
+Recently we set CONFIG_SND_HDA_POWER_SAVE_DEFAULT to 1 when
+configuring the kernel, then two machines were reported to have noise
+after installing the new kernel. Put them in the blacklist, the
+noise disappears.
+
+https://bugs.launchpad.net/bugs/1821663
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Hui Wang <hui.wang@canonical.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ sound/pci/hda/hda_intel.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index ece256a3b48f..2ec91085fa3e 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2142,6 +2142,8 @@ static struct snd_pci_quirk power_save_blacklist[] = {
+ SND_PCI_QUIRK(0x8086, 0x2040, "Intel DZ77BH-55K", 0),
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=199607 */
+ SND_PCI_QUIRK(0x8086, 0x2057, "Intel NUC5i7RYB", 0),
++ /* https://bugs.launchpad.net/bugs/1821663 */
++ SND_PCI_QUIRK(0x8086, 0x2064, "Intel SDP 8086:2064", 0),
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1520902 */
+ SND_PCI_QUIRK(0x8086, 0x2068, "Intel NUC7i3BNB", 0),
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
+@@ -2150,6 +2152,8 @@ static struct snd_pci_quirk power_save_blacklist[] = {
+ SND_PCI_QUIRK(0x17aa, 0x367b, "Lenovo IdeaCentre B550", 0),
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
+ SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
++ /* https://bugs.launchpad.net/bugs/1821663 */
++ SND_PCI_QUIRK(0x1631, 0xe017, "Packard Bell NEC IMEDIA 5204", 0),
+ {}
+ };
+ #endif /* CONFIG_PM */
+--
+2.16.4
+
diff --git a/patches.drivers/ALSA-seq-Fix-OOB-reads-from-strlcpy.patch b/patches.drivers/ALSA-seq-Fix-OOB-reads-from-strlcpy.patch
new file mode 100644
index 0000000000..d9c1789ed2
--- /dev/null
+++ b/patches.drivers/ALSA-seq-Fix-OOB-reads-from-strlcpy.patch
@@ -0,0 +1,55 @@
+From 212ac181c158c09038c474ba68068be49caecebb Mon Sep 17 00:00:00 2001
+From: Zubin Mithra <zsm@chromium.org>
+Date: Thu, 4 Apr 2019 14:33:55 -0700
+Subject: [PATCH] ALSA: seq: Fix OOB-reads from strlcpy
+Git-commit: 212ac181c158c09038c474ba68068be49caecebb
+Patch-mainline: v5.1-rc5
+References: bsc#1051510
+
+When ioctl calls are made with non-null-terminated userspace strings,
+strlcpy causes an OOB-read from within strlen. Fix by changing to use
+strscpy instead.
+
+Signed-off-by: Zubin Mithra <zsm@chromium.org>
+Reviewed-by: Guenter Roeck <groeck@chromium.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ sound/core/seq/seq_clientmgr.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index 7d4640d1fe9f..38e7deab6384 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -1252,7 +1252,7 @@ static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client,
+
+ /* fill the info fields */
+ if (client_info->name[0])
+- strlcpy(client->name, client_info->name, sizeof(client->name));
++ strscpy(client->name, client_info->name, sizeof(client->name));
+
+ client->filter = client_info->filter;
+ client->event_lost = client_info->event_lost;
+@@ -1530,7 +1530,7 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
+ /* set queue name */
+ if (!info->name[0])
+ snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
+- strlcpy(q->name, info->name, sizeof(q->name));
++ strscpy(q->name, info->name, sizeof(q->name));
+ snd_use_lock_free(&q->use_lock);
+
+ return 0;
+@@ -1592,7 +1592,7 @@ static int snd_seq_ioctl_set_queue_info(struct snd_seq_client *client,
+ queuefree(q);
+ return -EPERM;
+ }
+- strlcpy(q->name, info->name, sizeof(q->name));
++ strscpy(q->name, info->name, sizeof(q->name));
+ queuefree(q);
+
+ return 0;
+--
+2.16.4
+
diff --git a/patches.drivers/NFC-nci-Add-some-bounds-checking-in-nci_hci_cmd_rece.patch b/patches.drivers/NFC-nci-Add-some-bounds-checking-in-nci_hci_cmd_rece.patch
new file mode 100644
index 0000000000..5635f986fa
--- /dev/null
+++ b/patches.drivers/NFC-nci-Add-some-bounds-checking-in-nci_hci_cmd_rece.patch
@@ -0,0 +1,57 @@
+From d7ee81ad09f072eab1681877fc71ec05f9c1ae92 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Wed, 3 Apr 2019 10:12:48 +0300
+Subject: [PATCH] NFC: nci: Add some bounds checking in nci_hci_cmd_received()
+Git-commit: d7ee81ad09f072eab1681877fc71ec05f9c1ae92
+Patch-mainline: v5.1-rc5
+References: bsc#1051510
+
+This is similar to commit 674d9de02aa7 ("NFC: Fix possible memory
+corruption when handling SHDLC I-Frame commands").
+
+I'm not totally sure, but I think that commit description may have
+overstated the danger. I was under the impression that this data came
+from the firmware? If you can't trust your networking firmware, then
+you're already in trouble.
+
+Anyway, these days we add bounds checking where ever we can and we call
+it kernel hardening. Better safe than sorry.
+
+Fixes: 11f54f228643 ("NFC: nci: Add HCI over NCI protocol support")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ net/nfc/nci/hci.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
+index ddfc52ac1f9b..c0d323b58e73 100644
+--- a/net/nfc/nci/hci.c
++++ b/net/nfc/nci/hci.c
+@@ -312,6 +312,10 @@ static void nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe,
+ create_info = (struct nci_hci_create_pipe_resp *)skb->data;
+ dest_gate = create_info->dest_gate;
+ new_pipe = create_info->pipe;
++ if (new_pipe >= NCI_HCI_MAX_PIPES) {
++ status = NCI_HCI_ANY_E_NOK;
++ goto exit;
++ }
+
+ /* Save the new created pipe and bind with local gate,
+ * the description for skb->data[3] is destination gate id
+@@ -336,6 +340,10 @@ static void nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe,
+ goto exit;
+ }
+ delete_info = (struct nci_hci_delete_pipe_noti *)skb->data;
++ if (delete_info->pipe >= NCI_HCI_MAX_PIPES) {
++ status = NCI_HCI_ANY_E_NOK;
++ goto exit;
++ }
+
+ ndev->hci_dev->pipes[delete_info->pipe].gate =
+ NCI_HCI_INVALID_GATE;
+--
+2.16.4
+
diff --git a/patches.drivers/ibmvnic-Enable-GRO.patch b/patches.drivers/ibmvnic-Enable-GRO.patch
new file mode 100644
index 0000000000..c760f85d21
--- /dev/null
+++ b/patches.drivers/ibmvnic-Enable-GRO.patch
@@ -0,0 +1,35 @@
+From b66b7bd2bdc1a74c46a0a470f9ac19629320d212 Mon Sep 17 00:00:00 2001
+From: Thomas Falcon <tlfalcon@linux.ibm.com>
+Date: Wed, 10 Apr 2019 11:06:59 -0500
+Subject: [PATCH] ibmvnic: Enable GRO
+
+References: bsc#1132227
+Patch-mainline: queued
+Git-repo: https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
+Git-commit: b66b7bd2bdc1a74c46a0a470f9ac19629320d212
+
+Enable Generic Receive Offload in the ibmvnic driver.
+
+Signed-off-by: Thomas Falcon <tlfalcon@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+---
+ drivers/net/ethernet/ibm/ibmvnic.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 51cfe95f3e24..cc22c5351513 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -3837,7 +3837,7 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
+ adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
+ adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
+
+- adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO;
++ adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
+
+ if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
+ adapter->netdev->features |= NETIF_F_IP_CSUM;
+--
+2.20.1
+
diff --git a/patches.drivers/ibmvnic-Fix-netdev-feature-clobbering-during-a-reset.patch b/patches.drivers/ibmvnic-Fix-netdev-feature-clobbering-during-a-reset.patch
new file mode 100644
index 0000000000..6262c9c6af
--- /dev/null
+++ b/patches.drivers/ibmvnic-Fix-netdev-feature-clobbering-during-a-reset.patch
@@ -0,0 +1,90 @@
+From dde746a35f8b7da4b9515dd3dc4708a9926fbd65 Mon Sep 17 00:00:00 2001
+From: Thomas Falcon <tlfalcon@linux.ibm.com>
+Date: Wed, 10 Apr 2019 11:07:00 -0500
+Subject: [PATCH] ibmvnic: Fix netdev feature clobbering during a reset
+
+References: bsc#1132227
+Patch-mainline: queued
+Git-repo: https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
+Git-commit: dde746a35f8b7da4b9515dd3dc4708a9926fbd65
+
+While determining offload capabilities of backing hardware during
+a device reset, the driver is clobbering current feature settings.
+Update hw_features on reset instead of features unless a feature
+is enabled that is no longer supported on the current backing device.
+Also enable features that were not supported prior to the reset but
+were previously enabled or requested by the user.
+
+This can occur if the reset is the result of a carrier change, such
+as a device failover or partition migration.
+
+Signed-off-by: Thomas Falcon <tlfalcon@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+---
+ drivers/net/ethernet/ibm/ibmvnic.c | 30 ++++++++++++++++++++++++------
+ 1 file changed, 24 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index cc22c5351513..3dfb2d131eb7 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -3762,6 +3762,7 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
+ {
+ struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
++ netdev_features_t old_hw_features = 0;
+ union ibmvnic_crq crq;
+ int i;
+
+@@ -3837,24 +3838,41 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
+ adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
+ adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
+
++ if (adapter->state != VNIC_PROBING) {
++ old_hw_features = adapter->netdev->hw_features;
++ adapter->netdev->hw_features = 0;
++ }
++
+ adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
+
+ if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
+- adapter->netdev->features |= NETIF_F_IP_CSUM;
++ adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
+
+ if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
+- adapter->netdev->features |= NETIF_F_IPV6_CSUM;
++ adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
+
+ if ((adapter->netdev->features &
+ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
+- adapter->netdev->features |= NETIF_F_RXCSUM;
++ adapter->netdev->hw_features |= NETIF_F_RXCSUM;
+
+ if (buf->large_tx_ipv4)
+- adapter->netdev->features |= NETIF_F_TSO;
++ adapter->netdev->hw_features |= NETIF_F_TSO;
+ if (buf->large_tx_ipv6)
+- adapter->netdev->features |= NETIF_F_TSO6;
++ adapter->netdev->hw_features |= NETIF_F_TSO6;
+
+- adapter->netdev->hw_features |= adapter->netdev->features;
++ if (adapter->state == VNIC_PROBING) {
++ adapter->netdev->features |= adapter->netdev->hw_features;
++ } else if (old_hw_features != adapter->netdev->hw_features) {
++ netdev_features_t tmp = 0;
++
++ /* disable features no longer supported */
++ adapter->netdev->features &= adapter->netdev->hw_features;
++ /* turn on features now supported if previously enabled */
++ tmp = (old_hw_features ^ adapter->netdev->hw_features) &
++ adapter->netdev->hw_features;
++ adapter->netdev->features |=
++ tmp & adapter->netdev->wanted_features;
++ }
+
+ memset(&crq, 0, sizeof(crq));
+ crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
+--
+2.20.1
+
diff --git a/patches.drivers/iommu-amd-set-exclusion-range-correctly b/patches.drivers/iommu-amd-set-exclusion-range-correctly
new file mode 100644
index 0000000000..0044bbb6d3
--- /dev/null
+++ b/patches.drivers/iommu-amd-set-exclusion-range-correctly
@@ -0,0 +1,35 @@
+From: Joerg Roedel <jroedel@suse.de>
+Date: Fri, 12 Apr 2019 12:50:31 +0200
+Subject: iommu/amd: Set exclusion range correctly
+Git-commit: 3c677d206210f53a4be972211066c0f1cd47fe12
+Patch-mainline: v5.1-rc5
+References: bsc#1130425
+
+The exlcusion range limit register needs to contain the
+base-address of the last page that is part of the range, as
+bits 0-11 of this register are treated as 0xfff by the
+hardware for comparisons.
+
+So correctly set the exclusion range in the hardware to the
+last page which is _in_ the range.
+
+Fixes: b2026aa2dce44 ('x86, AMD IOMMU: add functions for programming IOMMU MMIO space')
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+---
+ drivers/iommu/amd_iommu_init.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
+index 1b1378619fc9..ff40ba758cf3 100644
+--- a/drivers/iommu/amd_iommu_init.c
++++ b/drivers/iommu/amd_iommu_init.c
+@@ -359,7 +359,7 @@ static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
+ static void iommu_set_exclusion_range(struct amd_iommu *iommu)
+ {
+ u64 start = iommu->exclusion_start & PAGE_MASK;
+- u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
++ u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
+ u64 entry;
+
+ if (!iommu->exclusion_start)
+
diff --git a/patches.drivers/mmc-tmio_mmc_core-don-t-claim-spurious-interrupts.patch b/patches.drivers/mmc-tmio_mmc_core-don-t-claim-spurious-interrupts.patch
new file mode 100644
index 0000000000..e3dff5b934
--- /dev/null
+++ b/patches.drivers/mmc-tmio_mmc_core-don-t-claim-spurious-interrupts.patch
@@ -0,0 +1,72 @@
+From 5c27ff5db1491a947264d6d4e4cbe43ae6535bae Mon Sep 17 00:00:00 2001
+From: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
+Date: Mon, 18 Feb 2019 20:45:40 +0300
+Subject: [PATCH] mmc: tmio_mmc_core: don't claim spurious interrupts
+Git-commit: 5c27ff5db1491a947264d6d4e4cbe43ae6535bae
+Patch-mainline: v5.0
+References: bsc#1051510
+
+I have encountered an interrupt storm during the eMMC chip probing (and
+the chip finally didn't get detected). It turned out that U-Boot left
+the DMAC interrupts enabled while the Linux driver didn't use those.
+The SDHI driver's interrupt handler somehow assumes that, even if an
+SDIO interrupt didn't happen, it should return IRQ_HANDLED. I think
+that if none of the enabled interrupts happened and got handled, we
+should return IRQ_NONE -- that way the kernel IRQ code recoginizes
+a spurious interrupt and masks it off pretty quickly...
+
+Fixes: 7729c7a232a9 ("mmc: tmio: Provide separate interrupt handlers")
+Signed-off-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
+Reviewed-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Tested-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Reviewed-by: Simon Horman <horms+renesas@verge.net.au>
+Cc: stable@vger.kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/mmc/host/tmio_mmc_pio.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/drivers/mmc/host/tmio_mmc_pio.c
++++ b/drivers/mmc/host/tmio_mmc_pio.c
+@@ -720,7 +720,7 @@ static bool __tmio_mmc_sdcard_irq(struct
+ return false;
+ }
+
+-static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
++static bool __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
+ {
+ struct mmc_host *mmc = host->mmc;
+ struct tmio_mmc_data *pdata = host->pdata;
+@@ -728,7 +728,7 @@ static void __tmio_mmc_sdio_irq(struct t
+ unsigned int sdio_status;
+
+ if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
+- return;
++ return false;
+
+ status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
+ ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask;
+@@ -741,6 +741,8 @@ static void __tmio_mmc_sdio_irq(struct t
+
+ if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
+ mmc_signal_sdio_irq(mmc);
++
++ return ireg;
+ }
+
+ irqreturn_t tmio_mmc_irq(int irq, void *devid)
+@@ -762,9 +764,10 @@ irqreturn_t tmio_mmc_irq(int irq, void *
+ if (__tmio_mmc_sdcard_irq(host, ireg, status))
+ return IRQ_HANDLED;
+
+- __tmio_mmc_sdio_irq(host);
++ if (__tmio_mmc_sdio_irq(host))
++ return IRQ_HANDLED;
+
+- return IRQ_HANDLED;
++ return IRQ_NONE;
+ }
+ EXPORT_SYMBOL(tmio_mmc_irq);
+
diff --git a/patches.drivers/staging-vt6655-Fix-interrupt-race-condition-on-devic.patch b/patches.drivers/staging-vt6655-Fix-interrupt-race-condition-on-devic.patch
new file mode 100644
index 0000000000..4a33e22452
--- /dev/null
+++ b/patches.drivers/staging-vt6655-Fix-interrupt-race-condition-on-devic.patch
@@ -0,0 +1,70 @@
+From 3b9c2f2e0e99bb67c96abcb659b3465efe3bee1f Mon Sep 17 00:00:00 2001
+From: Malcolm Priestley <tvboxspy@gmail.com>
+Date: Sun, 24 Mar 2019 18:53:49 +0000
+Subject: [PATCH] staging: vt6655: Fix interrupt race condition on device start up.
+Git-commit: 3b9c2f2e0e99bb67c96abcb659b3465efe3bee1f
+Patch-mainline: v5.1-rc3
+References: bsc#1051510
+
+It appears on some slower systems that the driver can find its way
+out of the workqueue while the interrupt is disabled by continuous polling
+by it.
+
+Move MACvIntEnable to vnt_interrupt_work so that it is always enabled
+on all routes out of vnt_interrupt_process.
+
+Move MACvIntDisable so that the device doesn't keep polling the system
+while the workqueue is being processed.
+
+Signed-off-by: Malcolm Priestley <tvboxspy@gmail.com>
+Cc: stable@vger.kernel.org # v4.2+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/staging/vt6655/device_main.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
+index b370985b58a1..83f1a1cf9182 100644
+--- a/drivers/staging/vt6655/device_main.c
++++ b/drivers/staging/vt6655/device_main.c
+@@ -1033,8 +1033,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
+ return;
+ }
+
+- MACvIntDisable(priv->PortOffset);
+-
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Read low level stats */
+@@ -1122,8 +1120,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+-
+- MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
+ }
+
+ static void vnt_interrupt_work(struct work_struct *work)
+@@ -1133,6 +1129,8 @@ static void vnt_interrupt_work(struct work_struct *work)
+
+ if (priv->vif)
+ vnt_interrupt_process(priv);
++
++ MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
+ }
+
+ static irqreturn_t vnt_interrupt(int irq, void *arg)
+@@ -1142,6 +1140,8 @@ static irqreturn_t vnt_interrupt(int irq, void *arg)
+ if (priv->vif)
+ schedule_work(&priv->interrupt_work);
+
++ MACvIntDisable(priv->PortOffset);
++
+ return IRQ_HANDLED;
+ }
+
+--
+2.16.4
+
diff --git a/patches.drivers/staging-vt6655-Remove-vif-check-from-vnt_interrupt.patch b/patches.drivers/staging-vt6655-Remove-vif-check-from-vnt_interrupt.patch
new file mode 100644
index 0000000000..dec7250735
--- /dev/null
+++ b/patches.drivers/staging-vt6655-Remove-vif-check-from-vnt_interrupt.patch
@@ -0,0 +1,39 @@
+From cc26358f89c3e493b54766b1ca56cfc6b14db78a Mon Sep 17 00:00:00 2001
+From: Malcolm Priestley <tvboxspy@gmail.com>
+Date: Wed, 27 Mar 2019 18:45:26 +0000
+Subject: [PATCH] staging: vt6655: Remove vif check from vnt_interrupt
+Git-commit: cc26358f89c3e493b54766b1ca56cfc6b14db78a
+Patch-mainline: v5.1-rc3
+References: bsc#1051510
+
+A check for vif is made in vnt_interrupt_work.
+
+There is a small chance of leaving interrupt disabled while vif
+is NULL and the work hasn't been scheduled.
+
+Signed-off-by: Malcolm Priestley <tvboxspy@gmail.com>
+Cc: stable@vger.kernel.org # v4.2+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/staging/vt6655/device_main.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
+index 83f1a1cf9182..c6bb4aaf9bd0 100644
+--- a/drivers/staging/vt6655/device_main.c
++++ b/drivers/staging/vt6655/device_main.c
+@@ -1137,8 +1137,7 @@ static irqreturn_t vnt_interrupt(int irq, void *arg)
+ {
+ struct vnt_private *priv = arg;
+
+- if (priv->vif)
+- schedule_work(&priv->interrupt_work);
++ schedule_work(&priv->interrupt_work);
+
+ MACvIntDisable(priv->PortOffset);
+
+--
+2.16.4
+
diff --git a/patches.drivers/vfio-type1-limit-dma-mappings-per-container b/patches.drivers/vfio-type1-limit-dma-mappings-per-container
new file mode 100644
index 0000000000..5f4d67ede6
--- /dev/null
+++ b/patches.drivers/vfio-type1-limit-dma-mappings-per-container
@@ -0,0 +1,94 @@
+From: Alex Williamson <alex.williamson@redhat.com>
+Date: Wed, 3 Apr 2019 12:36:21 -0600
+Subject: vfio/type1: Limit DMA mappings per container
+Git-commit: 492855939bdb59c6f947b0b5b44af9ad82b7e38c
+Patch-mainline: v5.1-rc4
+References: CVE-2019-3882 bsc#1131427
+
+Memory backed DMA mappings are accounted against a user's locked
+memory limit, including multiple mappings of the same memory. This
+accounting bounds the number of such mappings that a user can create.
+However, DMA mappings that are not backed by memory, such as DMA
+mappings of device MMIO via mmaps, do not make use of page pinning
+and therefore do not count against the user's locked memory limit.
+These mappings still consume memory, but the memory is not well
+associated to the process for the purpose of oom killing a task.
+
+To add bounding on this use case, we introduce a limit to the total
+number of concurrent DMA mappings that a user is allowed to create.
+This limit is exposed as a tunable module option where the default
+value of 64K is expected to be well in excess of any reasonable use
+case (a large virtual machine configuration would typically only make
+use of tens of concurrent mappings).
+
+This fixes CVE-2019-3882.
+
+Reviewed-by: Eric Auger <eric.auger@redhat.com>
+Tested-by: Eric Auger <eric.auger@redhat.com>
+Reviewed-by: Peter Xu <peterx@redhat.com>
+Reviewed-by: Cornelia Huck <cohuck@redhat.com>
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Acked-by: Joerg Roedel <jroedel@suse.de>
+---
+ drivers/vfio/vfio_iommu_type1.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index 73652e21efec..d0f731c9920a 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -58,12 +58,18 @@ module_param_named(disable_hugepages,
+ MODULE_PARM_DESC(disable_hugepages,
+ "Disable VFIO IOMMU support for IOMMU hugepages.");
+
++static unsigned int dma_entry_limit __read_mostly = U16_MAX;
++module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
++MODULE_PARM_DESC(dma_entry_limit,
++ "Maximum number of user DMA mappings per container (65535).");
++
+ struct vfio_iommu {
+ struct list_head domain_list;
+ struct vfio_domain *external_domain; /* domain for external user */
+ struct mutex lock;
+ struct rb_root dma_list;
+ struct blocking_notifier_head notifier;
++ unsigned int dma_avail;
+ bool v2;
+ bool nesting;
+ };
+@@ -836,6 +842,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
+ vfio_unlink_dma(iommu, dma);
+ put_task_struct(dma->task);
+ kfree(dma);
++ iommu->dma_avail++;
+ }
+
+ static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
+@@ -1081,12 +1088,18 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
+ goto out_unlock;
+ }
+
++ if (!iommu->dma_avail) {
++ ret = -ENOSPC;
++ goto out_unlock;
++ }
++
+ dma = kzalloc(sizeof(*dma), GFP_KERNEL);
+ if (!dma) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
++ iommu->dma_avail--;
+ dma->iova = iova;
+ dma->vaddr = vaddr;
+ dma->prot = prot;
+@@ -1583,6 +1596,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
+
+ INIT_LIST_HEAD(&iommu->domain_list);
+ iommu->dma_list = RB_ROOT;
++ iommu->dma_avail = dma_entry_limit;
+ mutex_init(&iommu->lock);
+ BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
+
+
diff --git a/patches.fixes/0001-Collect-descriptors-of-all-ULD-and-LLD-hardware-queu.patch b/patches.fixes/0001-Collect-descriptors-of-all-ULD-and-LLD-hardware-queu.patch
new file mode 100644
index 0000000000..13631d9389
--- /dev/null
+++ b/patches.fixes/0001-Collect-descriptors-of-all-ULD-and-LLD-hardware-queu.patch
@@ -0,0 +1,503 @@
+From: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
+Subject: cxgb4: collect hardware queue descriptors
+Patch-mainline: v4.20-rc1
+Git-commit: 68ddc82aff0c772364757028c2319f386464d512
+References: bsc#1127371
+
+Collect descriptors of all ULD and LLD hardware queues managed
+by LLD.
+
+Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
+Signed-off-by: Ganesh Goudar <ganeshgr@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Denis Kirjanov <dkirjanov@suse.com>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h | 42 ++++
+ drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h | 3 +-
+ drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | 238 ++++++++++++++++++++++
+ drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h | 102 ++++++++++
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 7 +
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c | 4 +
+ 6 files changed, 395 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+index 3c5057868ab3..594ab2f9a299 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+@@ -304,6 +304,48 @@ struct cudbg_pbt_tables {
+ u32 pbt_data[CUDBG_PBT_DATA_ENTRIES];
+ };
+
++enum cudbg_qdesc_qtype {
++ CUDBG_QTYPE_UNKNOWN = 0,
++ CUDBG_QTYPE_NIC_TXQ,
++ CUDBG_QTYPE_NIC_RXQ,
++ CUDBG_QTYPE_NIC_FLQ,
++ CUDBG_QTYPE_CTRLQ,
++ CUDBG_QTYPE_FWEVTQ,
++ CUDBG_QTYPE_INTRQ,
++ CUDBG_QTYPE_PTP_TXQ,
++ CUDBG_QTYPE_OFLD_TXQ,
++ CUDBG_QTYPE_RDMA_RXQ,
++ CUDBG_QTYPE_RDMA_FLQ,
++ CUDBG_QTYPE_RDMA_CIQ,
++ CUDBG_QTYPE_ISCSI_RXQ,
++ CUDBG_QTYPE_ISCSI_FLQ,
++ CUDBG_QTYPE_ISCSIT_RXQ,
++ CUDBG_QTYPE_ISCSIT_FLQ,
++ CUDBG_QTYPE_CRYPTO_TXQ,
++ CUDBG_QTYPE_CRYPTO_RXQ,
++ CUDBG_QTYPE_CRYPTO_FLQ,
++ CUDBG_QTYPE_TLS_RXQ,
++ CUDBG_QTYPE_TLS_FLQ,
++ CUDBG_QTYPE_MAX,
++};
++
++#define CUDBG_QDESC_REV 1
++
++struct cudbg_qdesc_entry {
++ u32 data_size;
++ u32 qtype;
++ u32 qid;
++ u32 desc_size;
++ u32 num_desc;
++ u8 data[0]; /* Must be last */
++};
++
++struct cudbg_qdesc_info {
++ u32 qdesc_entry_size;
++ u32 num_queues;
++ u8 data[0]; /* Must be last */
++};
++
+ #define IREG_NUM_ELEM 4
+
+ static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = {
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
+index 215fe6260fd7..dec63c15c0ba 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
+@@ -81,7 +81,8 @@ enum cudbg_dbg_entity_type {
+ CUDBG_MBOX_LOG = 66,
+ CUDBG_HMA_INDIRECT = 67,
+ CUDBG_HMA = 68,
+- CUDBG_MAX_ENTITY = 70,
++ CUDBG_QDESC = 70,
++ CUDBG_MAX_ENTITY = 71,
+ };
+
+ struct cudbg_init {
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+index 9ae8d48bc4d7..9b63db5ae626 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+@@ -19,6 +19,7 @@
+
+ #include "t4_regs.h"
+ #include "cxgb4.h"
++#include "cxgb4_cudbg.h"
+ #include "cudbg_if.h"
+ #include "cudbg_lib_common.h"
+ #include "cudbg_entity.h"
+@@ -2797,3 +2798,240 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
+ }
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
+ }
++
++void cudbg_fill_qdesc_num_and_size(const struct adapter *padap,
++ u32 *num, u32 *size)
++{
++ u32 tot_entries = 0, tot_size = 0;
++
++ /* NIC TXQ, RXQ, FLQ, and CTRLQ */
++ tot_entries += MAX_ETH_QSETS * 3;
++ tot_entries += MAX_CTRL_QUEUES;
++
++ tot_size += MAX_ETH_QSETS * MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE;
++ tot_size += MAX_ETH_QSETS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE;
++ tot_size += MAX_ETH_QSETS * MAX_RX_BUFFERS * MAX_FL_DESC_SIZE;
++ tot_size += MAX_CTRL_QUEUES * MAX_CTRL_TXQ_ENTRIES *
++ MAX_CTRL_TXQ_DESC_SIZE;
++
++ /* FW_EVTQ and INTRQ */
++ tot_entries += INGQ_EXTRAS;
++ tot_size += INGQ_EXTRAS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE;
++
++ /* PTP_TXQ */
++ tot_entries += 1;
++ tot_size += MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE;
++
++ /* ULD TXQ, RXQ, and FLQ */
++ tot_entries += CXGB4_TX_MAX * MAX_OFLD_QSETS;
++ tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS * 2;
++
++ tot_size += CXGB4_TX_MAX * MAX_OFLD_QSETS * MAX_TXQ_ENTRIES *
++ MAX_TXQ_DESC_SIZE;
++ tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RSPQ_ENTRIES *
++ MAX_RXQ_DESC_SIZE;
++ tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RX_BUFFERS *
++ MAX_FL_DESC_SIZE;
++
++ /* ULD CIQ */
++ tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS;
++ tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * SGE_MAX_IQ_SIZE *
++ MAX_RXQ_DESC_SIZE;
++
++ tot_size += sizeof(struct cudbg_ver_hdr) +
++ sizeof(struct cudbg_qdesc_info) +
++ sizeof(struct cudbg_qdesc_entry) * tot_entries;
++
++ if (num)
++ *num = tot_entries;
++
++ if (size)
++ *size = tot_size;
++}
++
++int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
++ struct cudbg_buffer *dbg_buff,
++ struct cudbg_error *cudbg_err)
++{
++ u32 num_queues = 0, tot_entries = 0, size = 0;
++ struct adapter *padap = pdbg_init->adap;
++ struct cudbg_buffer temp_buff = { 0 };
++ struct cudbg_qdesc_entry *qdesc_entry;
++ struct cudbg_qdesc_info *qdesc_info;
++ struct cudbg_ver_hdr *ver_hdr;
++ struct sge *s = &padap->sge;
++ u32 i, j, cur_off, tot_len;
++ u8 *data;
++ int rc;
++
++ cudbg_fill_qdesc_num_and_size(padap, &tot_entries, &size);
++ size = min_t(u32, size, CUDBG_DUMP_BUFF_SIZE);
++ tot_len = size;
++ data = kvzalloc(size, GFP_KERNEL);
++ if (!data)
++ return -ENOMEM;
++
++ ver_hdr = (struct cudbg_ver_hdr *)data;
++ ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
++ ver_hdr->revision = CUDBG_QDESC_REV;
++ ver_hdr->size = sizeof(struct cudbg_qdesc_info);
++ size -= sizeof(*ver_hdr);
++
++ qdesc_info = (struct cudbg_qdesc_info *)(data +
++ sizeof(*ver_hdr));
++ size -= sizeof(*qdesc_info);
++ qdesc_entry = (struct cudbg_qdesc_entry *)qdesc_info->data;
++
++#define QDESC_GET(q, desc, type, label) do { \
++ if (size <= 0) { \
++ goto label; \
++ } \
++ if (desc) { \
++ cudbg_fill_qdesc_##q(q, type, qdesc_entry); \
++ size -= sizeof(*qdesc_entry) + qdesc_entry->data_size; \
++ num_queues++; \
++ qdesc_entry = cudbg_next_qdesc(qdesc_entry); \
++ } \
++} while (0)
++
++#define QDESC_GET_TXQ(q, type, label) do { \
++ struct sge_txq *txq = (struct sge_txq *)q; \
++ QDESC_GET(txq, txq->desc, type, label); \
++} while (0)
++
++#define QDESC_GET_RXQ(q, type, label) do { \
++ struct sge_rspq *rxq = (struct sge_rspq *)q; \
++ QDESC_GET(rxq, rxq->desc, type, label); \
++} while (0)
++
++#define QDESC_GET_FLQ(q, type, label) do { \
++ struct sge_fl *flq = (struct sge_fl *)q; \
++ QDESC_GET(flq, flq->desc, type, label); \
++} while (0)
++
++ /* NIC TXQ */
++ for (i = 0; i < s->ethqsets; i++)
++ QDESC_GET_TXQ(&s->ethtxq[i].q, CUDBG_QTYPE_NIC_TXQ, out);
++
++ /* NIC RXQ */
++ for (i = 0; i < s->ethqsets; i++)
++ QDESC_GET_RXQ(&s->ethrxq[i].rspq, CUDBG_QTYPE_NIC_RXQ, out);
++
++ /* NIC FLQ */
++ for (i = 0; i < s->ethqsets; i++)
++ QDESC_GET_FLQ(&s->ethrxq[i].fl, CUDBG_QTYPE_NIC_FLQ, out);
++
++ /* NIC CTRLQ */
++ for (i = 0; i < padap->params.nports; i++)
++ QDESC_GET_TXQ(&s->ctrlq[i].q, CUDBG_QTYPE_CTRLQ, out);
++
++ /* FW_EVTQ */
++ QDESC_GET_RXQ(&s->fw_evtq, CUDBG_QTYPE_FWEVTQ, out);
++
++ /* INTRQ */
++ QDESC_GET_RXQ(&s->intrq, CUDBG_QTYPE_INTRQ, out);
++
++ /* PTP_TXQ */
++ QDESC_GET_TXQ(&s->ptptxq.q, CUDBG_QTYPE_PTP_TXQ, out);
++
++ /* ULD Queues */
++ mutex_lock(&uld_mutex);
++
++ if (s->uld_txq_info) {
++ struct sge_uld_txq_info *utxq;
++
++ /* ULD TXQ */
++ for (j = 0; j < CXGB4_TX_MAX; j++) {
++ if (!s->uld_txq_info[j])
++ continue;
++
++ utxq = s->uld_txq_info[j];
++ for (i = 0; i < utxq->ntxq; i++)
++ QDESC_GET_TXQ(&utxq->uldtxq[i].q,
++ cudbg_uld_txq_to_qtype(j),
++ out_unlock);
++ }
++ }
++
++ if (s->uld_rxq_info) {
++ struct sge_uld_rxq_info *urxq;
++ u32 base;
++
++ /* ULD RXQ */
++ for (j = 0; j < CXGB4_ULD_MAX; j++) {
++ if (!s->uld_rxq_info[j])
++ continue;
++
++ urxq = s->uld_rxq_info[j];
++ for (i = 0; i < urxq->nrxq; i++)
++ QDESC_GET_RXQ(&urxq->uldrxq[i].rspq,
++ cudbg_uld_rxq_to_qtype(j),
++ out_unlock);
++ }
++
++ /* ULD FLQ */
++ for (j = 0; j < CXGB4_ULD_MAX; j++) {
++ if (!s->uld_rxq_info[j])
++ continue;
++
++ urxq = s->uld_rxq_info[j];
++ for (i = 0; i < urxq->nrxq; i++)
++ QDESC_GET_FLQ(&urxq->uldrxq[i].fl,
++ cudbg_uld_flq_to_qtype(j),
++ out_unlock);
++ }
++
++ /* ULD CIQ */
++ for (j = 0; j < CXGB4_ULD_MAX; j++) {
++ if (!s->uld_rxq_info[j])
++ continue;
++
++ urxq = s->uld_rxq_info[j];
++ base = urxq->nrxq;
++ for (i = 0; i < urxq->nciq; i++)
++ QDESC_GET_RXQ(&urxq->uldrxq[base + i].rspq,
++ cudbg_uld_ciq_to_qtype(j),
++ out_unlock);
++ }
++ }
++
++out_unlock:
++ mutex_unlock(&uld_mutex);
++
++out:
++ qdesc_info->qdesc_entry_size = sizeof(*qdesc_entry);
++ qdesc_info->num_queues = num_queues;
++ cur_off = 0;
++ while (tot_len) {
++ u32 chunk_size = min_t(u32, tot_len, CUDBG_CHUNK_SIZE);
++
++ rc = cudbg_get_buff(pdbg_init, dbg_buff, chunk_size,
++ &temp_buff);
++ if (rc) {
++ cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
++ goto out_free;
++ }
++
++ memcpy(temp_buff.data, data + cur_off, chunk_size);
++ tot_len -= chunk_size;
++ cur_off += chunk_size;
++ rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff,
++ dbg_buff);
++ if (rc) {
++ cudbg_put_buff(pdbg_init, &temp_buff);
++ cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
++ goto out_free;
++ }
++ }
++
++out_free:
++ if (data)
++ kvfree(data);
++
++#undef QDESC_GET_FLQ
++#undef QDESC_GET_RXQ
++#undef QDESC_GET_TXQ
++#undef QDESC_GET
++
++ return rc;
++}
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
+index eebefe7cd18e..ecbe203bb57e 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
+@@ -171,6 +171,9 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
+ int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
++int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
++ struct cudbg_buffer *dbg_buff,
++ struct cudbg_error *cudbg_err);
+
+ struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i);
+ void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
+@@ -182,4 +185,103 @@ int cudbg_fill_meminfo(struct adapter *padap,
+ struct cudbg_meminfo *meminfo_buff);
+ void cudbg_fill_le_tcam_info(struct adapter *padap,
+ struct cudbg_tcam *tcam_region);
++void cudbg_fill_qdesc_num_and_size(const struct adapter *padap,
++ u32 *num, u32 *size);
++
++static inline u32 cudbg_uld_txq_to_qtype(u32 uld)
++{
++ switch (uld) {
++ case CXGB4_TX_OFLD:
++ return CUDBG_QTYPE_OFLD_TXQ;
++ case CXGB4_TX_CRYPTO:
++ return CUDBG_QTYPE_CRYPTO_TXQ;
++ }
++
++ return CUDBG_QTYPE_UNKNOWN;
++}
++
++static inline u32 cudbg_uld_rxq_to_qtype(u32 uld)
++{
++ switch (uld) {
++ case CXGB4_ULD_RDMA:
++ return CUDBG_QTYPE_RDMA_RXQ;
++ case CXGB4_ULD_ISCSI:
++ return CUDBG_QTYPE_ISCSI_RXQ;
++ case CXGB4_ULD_ISCSIT:
++ return CUDBG_QTYPE_ISCSIT_RXQ;
++ case CXGB4_ULD_CRYPTO:
++ return CUDBG_QTYPE_CRYPTO_RXQ;
++ }
++
++ return CUDBG_QTYPE_UNKNOWN;
++}
++
++static inline u32 cudbg_uld_flq_to_qtype(u32 uld)
++{
++ switch (uld) {
++ case CXGB4_ULD_RDMA:
++ return CUDBG_QTYPE_RDMA_FLQ;
++ case CXGB4_ULD_ISCSI:
++ return CUDBG_QTYPE_ISCSI_FLQ;
++ case CXGB4_ULD_ISCSIT:
++ return CUDBG_QTYPE_ISCSIT_FLQ;
++ case CXGB4_ULD_CRYPTO:
++ return CUDBG_QTYPE_CRYPTO_FLQ;
++ }
++
++ return CUDBG_QTYPE_UNKNOWN;
++}
++
++static inline u32 cudbg_uld_ciq_to_qtype(u32 uld)
++{
++ switch (uld) {
++ case CXGB4_ULD_RDMA:
++ return CUDBG_QTYPE_RDMA_CIQ;
++ }
++
++ return CUDBG_QTYPE_UNKNOWN;
++}
++
++static inline void cudbg_fill_qdesc_txq(const struct sge_txq *txq,
++ enum cudbg_qdesc_qtype type,
++ struct cudbg_qdesc_entry *entry)
++{
++ entry->qtype = type;
++ entry->qid = txq->cntxt_id;
++ entry->desc_size = sizeof(struct tx_desc);
++ entry->num_desc = txq->size;
++ entry->data_size = txq->size * sizeof(struct tx_desc);
++ memcpy(entry->data, txq->desc, entry->data_size);
++}
++
++static inline void cudbg_fill_qdesc_rxq(const struct sge_rspq *rxq,
++ enum cudbg_qdesc_qtype type,
++ struct cudbg_qdesc_entry *entry)
++{
++ entry->qtype = type;
++ entry->qid = rxq->cntxt_id;
++ entry->desc_size = rxq->iqe_len;
++ entry->num_desc = rxq->size;
++ entry->data_size = rxq->size * rxq->iqe_len;
++ memcpy(entry->data, rxq->desc, entry->data_size);
++}
++
++static inline void cudbg_fill_qdesc_flq(const struct sge_fl *flq,
++ enum cudbg_qdesc_qtype type,
++ struct cudbg_qdesc_entry *entry)
++{
++ entry->qtype = type;
++ entry->qid = flq->cntxt_id;
++ entry->desc_size = sizeof(__be64);
++ entry->num_desc = flq->size;
++ entry->data_size = flq->size * sizeof(__be64);
++ memcpy(entry->data, flq->desc, entry->data_size);
++}
++
++static inline
++struct cudbg_qdesc_entry *cudbg_next_qdesc(struct cudbg_qdesc_entry *e)
++{
++ return (struct cudbg_qdesc_entry *)
++ ((u8 *)e + sizeof(*e) + e->data_size);
++}
+ #endif /* __CUDBG_LIB_H__ */
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+index e9eb4003b813..525bdd4b4564 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+@@ -526,6 +526,13 @@ enum {
+ };
+
+ enum {
++ MAX_TXQ_DESC_SIZE = 64,
++ MAX_RXQ_DESC_SIZE = 128,
++ MAX_FL_DESC_SIZE = 8,
++ MAX_CTRL_TXQ_DESC_SIZE = 64,
++};
++
++enum {
+ INGQ_EXTRAS = 2, /* firmware event queue and */
+ /* forwarded interrupts */
+ MAX_INGQ = MAX_ETH_QSETS + INGQ_EXTRAS,
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
+index 8d751efcb90e..540cefe33400 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
+@@ -30,6 +30,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = {
+
+ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
+ { CUDBG_MBOX_LOG, cudbg_collect_mbox_log },
++ { CUDBG_QDESC, cudbg_collect_qdesc },
+ { CUDBG_DEV_LOG, cudbg_collect_fw_devlog },
+ { CUDBG_REG_DUMP, cudbg_collect_reg_dump },
+ { CUDBG_CIM_LA, cudbg_collect_cim_la },
+@@ -309,6 +310,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
+ }
+ len = cudbg_mbytes_to_bytes(len);
+ break;
++ case CUDBG_QDESC:
++ cudbg_fill_qdesc_num_and_size(adap, NULL, &len);
++ break;
+ default:
+ break;
+ }
+--
+2.12.3
+
diff --git a/patches.fixes/0001-cxgb4-Add-capability-to-get-set-SGE-Doorbell-Queue-T.patch b/patches.fixes/0001-cxgb4-Add-capability-to-get-set-SGE-Doorbell-Queue-T.patch
new file mode 100644
index 0000000000..bed21b6b92
--- /dev/null
+++ b/patches.fixes/0001-cxgb4-Add-capability-to-get-set-SGE-Doorbell-Queue-T.patch
@@ -0,0 +1,268 @@
+From: Vishal Kulkarni <vishal@chelsio.com>
+Subject: cxgb4: Add capability to get/set SGE Doorbell Queue Timer
+ Tick
+Patch-mainline: v5.1-rc1
+Git-commit: 543a1b85e734d2ec62e5df61641e8ea7349de4a8
+References: bsc#1127371
+
+This patch gets/sets SGE Doorbell Queue timer ticks via ethtool
+
+Original work by: Casey Leedom <leedom@chelsio.com>
+
+Signed-off-by: Vishal Kulkarni <vishal@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Denis Kirjanov <dkirjanov@suse.com>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 1 +
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 189 ++++++++++++++++++++-
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 14 +-
+ 3 files changed, 198 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+index 2e68b6d0376b..240f5c7ad170 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+@@ -810,6 +810,7 @@ struct sge {
+ u32 fl_align; /* response queue message alignment */
+ u32 fl_starve_thres; /* Free List starvation threshold */
+
++ u16 dbqtimer_tick;
+ struct sge_idma_monitor_state idma_monitor;
+ unsigned int egr_start;
+ unsigned int egr_sz;
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+index 20083b4f72c4..16f018d85eae 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+@@ -1044,11 +1044,190 @@ static int get_adaptive_rx_setting(struct net_device *dev)
+ return q->rspq.adaptive_rx;
+ }
+
+-static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
++/* Return the current global Adapter SGE Doorbell Queue Timer Tick for all
++ * Ethernet TX Queues.
++ */
++static int get_dbqtimer_tick(struct net_device *dev)
++{
++ struct port_info *pi = netdev_priv(dev);
++ struct adapter *adap = pi->adapter;
++
++ if (!(adap->flags & SGE_DBQ_TIMER))
++ return 0;
++
++ return adap->sge.dbqtimer_tick;
++}
++
++/* Return the SGE Doorbell Queue Timer Value for the Ethernet TX Queues
++ * associated with a Network Device.
++ */
++static int get_dbqtimer(struct net_device *dev)
++{
++ struct port_info *pi = netdev_priv(dev);
++ struct adapter *adap = pi->adapter;
++ struct sge_eth_txq *txq;
++
++ txq = &adap->sge.ethtxq[pi->first_qset];
++
++ if (!(adap->flags & SGE_DBQ_TIMER))
++ return 0;
++
++ /* all of the TX Queues use the same Timer Index */
++ return adap->sge.dbqtimer_val[txq->dbqtimerix];
++}
++
++/* Set the global Adapter SGE Doorbell Queue Timer Tick for all Ethernet TX
++ * Queues. This is the fundamental "Tick" that sets the scale of values which
++ * can be used. Individual Ethernet TX Queues index into a relatively small
++ * array of Tick Multipliers. Changing the base Tick will thus change all of
++ * the resulting Timer Values associated with those multipliers for all
++ * Ethernet TX Queues.
++ */
++static int set_dbqtimer_tick(struct net_device *dev, int usecs)
++{
++ struct port_info *pi = netdev_priv(dev);
++ struct adapter *adap = pi->adapter;
++ struct sge *s = &adap->sge;
++ u32 param, val;
++ int ret;
++
++ if (!(adap->flags & SGE_DBQ_TIMER))
++ return 0;
++
++ /* return early if it's the same Timer Tick we're already using */
++ if (s->dbqtimer_tick == usecs)
++ return 0;
++
++ /* attempt to set the new Timer Tick value */
++ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
++ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK));
++ val = usecs;
++ ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
++ if (ret)
++ return ret;
++ s->dbqtimer_tick = usecs;
++
++ /* if successful, reread resulting dependent Timer values */
++ ret = t4_read_sge_dbqtimers(adap, ARRAY_SIZE(s->dbqtimer_val),
++ s->dbqtimer_val);
++ return ret;
++}
++
++/* Set the SGE Doorbell Queue Timer Value for the Ethernet TX Queues
++ * associated with a Network Device. There is a relatively small array of
++ * possible Timer Values so we need to pick the closest value available.
++ */
++static int set_dbqtimer(struct net_device *dev, int usecs)
++{
++ int qix, timerix, min_timerix, delta, min_delta;
++ struct port_info *pi = netdev_priv(dev);
++ struct adapter *adap = pi->adapter;
++ struct sge *s = &adap->sge;
++ struct sge_eth_txq *txq;
++ u32 param, val;
++ int ret;
++
++ if (!(adap->flags & SGE_DBQ_TIMER))
++ return 0;
++
++ /* Find the SGE Doorbell Timer Value that's closest to the requested
++ * value.
++ */
++ min_delta = INT_MAX;
++ min_timerix = 0;
++ for (timerix = 0; timerix < ARRAY_SIZE(s->dbqtimer_val); timerix++) {
++ delta = s->dbqtimer_val[timerix] - usecs;
++ if (delta < 0)
++ delta = -delta;
++ if (delta < min_delta) {
++ min_delta = delta;
++ min_timerix = timerix;
++ }
++ }
++
++ /* Return early if it's the same Timer Index we're already using.
++ * We use the same Timer Index for all of the TX Queues for an
++ * interface so it's only necessary to check the first one.
++ */
++ txq = &s->ethtxq[pi->first_qset];
++ if (txq->dbqtimerix == min_timerix)
++ return 0;
++
++ for (qix = 0; qix < pi->nqsets; qix++, txq++) {
++ if (adap->flags & FULL_INIT_DONE) {
++ param =
++ (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
++ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_TIMERIX) |
++ FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
++ val = min_timerix;
++ ret = t4_set_params(adap, adap->mbox, adap->pf, 0,
++ 1, &param, &val);
++ if (ret)
++ return ret;
++ }
++ txq->dbqtimerix = min_timerix;
++ }
++ return 0;
++}
++
++/* Set the global Adapter SGE Doorbell Queue Timer Tick for all Ethernet TX
++ * Queues and the Timer Value for the Ethernet TX Queues associated with a
++ * Network Device. Since changing the global Tick changes all of the
++ * available Timer Values, we need to do this first before selecting the
++ * resulting closest Timer Value. Moreover, since the Tick is global,
++ * changing it affects the Timer Values for all Network Devices on the
++ * adapter. So, before changing the Tick, we grab all of the current Timer
++ * Values for other Network Devices on this Adapter and then attempt to select
++ * new Timer Values which are close to the old values ...
++ */
++static int set_dbqtimer_tickval(struct net_device *dev,
++ int tick_usecs, int timer_usecs)
++{
++ struct port_info *pi = netdev_priv(dev);
++ struct adapter *adap = pi->adapter;
++ int timer[MAX_NPORTS];
++ unsigned int port;
++ int ret;
++
++ /* Grab the other adapter Network Interface current timers and fill in
++ * the new one for this Network Interface.
++ */
++ for_each_port(adap, port)
++ if (port == pi->port_id)
++ timer[port] = timer_usecs;
++ else
++ timer[port] = get_dbqtimer(adap->port[port]);
++
++ /* Change the global Tick first ... */
++ ret = set_dbqtimer_tick(dev, tick_usecs);
++ if (ret)
++ return ret;
++
++ /* ... and then set all of the Network Interface Timer Values ... */
++ for_each_port(adap, port) {
++ ret = set_dbqtimer(adap->port[port], timer[port]);
++ if (ret)
++ return ret;
++ }
++
++ return 0;
++}
++
++static int set_coalesce(struct net_device *dev,
++ struct ethtool_coalesce *coalesce)
+ {
+- set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce);
+- return set_rx_intr_params(dev, c->rx_coalesce_usecs,
+- c->rx_max_coalesced_frames);
++ int ret;
++
++ set_adaptive_rx_setting(dev, coalesce->use_adaptive_rx_coalesce);
++
++ ret = set_rx_intr_params(dev, coalesce->rx_coalesce_usecs,
++ coalesce->rx_max_coalesced_frames);
++ if (ret)
++ return ret;
++
++ return set_dbqtimer_tickval(dev,
++ coalesce->tx_coalesce_usecs_irq,
++ coalesce->tx_coalesce_usecs);
+ }
+
+ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+@@ -1061,6 +1240,8 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+ c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN_F) ?
+ adap->sge.counter_val[rq->pktcnt_idx] : 0;
+ c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
++ c->tx_coalesce_usecs_irq = get_dbqtimer_tick(dev);
++ c->tx_coalesce_usecs = get_dbqtimer(dev);
+ return 0;
+ }
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+index 942bff1380c8..57fddf6c8881 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -4257,8 +4257,18 @@ static int adap_init0(struct adapter *adap)
+ /* Grab the SGE Doorbell Queue Timer values. If successful, that
+ * indicates that the Firmware and Hardware support this.
+ */
+- ret = t4_read_sge_dbqtimers(adap, ARRAY_SIZE(adap->sge.dbqtimer_val),
+- adap->sge.dbqtimer_val);
++ params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
++ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK));
++ ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
++ 1, params, val);
++
++ if (!ret) {
++ adap->sge.dbqtimer_tick = val[0];
++ ret = t4_read_sge_dbqtimers(adap,
++ ARRAY_SIZE(adap->sge.dbqtimer_val),
++ adap->sge.dbqtimer_val);
++ }
++
+ if (!ret)
+ adap->flags |= SGE_DBQ_TIMER;
+
+--
+2.12.3
+
diff --git a/patches.fixes/0001-cxgb4-Add-new-T5-PCI-device-id-0x50ae.patch b/patches.fixes/0001-cxgb4-Add-new-T5-PCI-device-id-0x50ae.patch
new file mode 100644
index 0000000000..c8b0e7fc5b
--- /dev/null
+++ b/patches.fixes/0001-cxgb4-Add-new-T5-PCI-device-id-0x50ae.patch
@@ -0,0 +1,28 @@
+From: Ganesh Goudar <ganeshgr@chelsio.com>
+Subject: cxgb4: Add new T5 PCI device id 0x50ae
+Patch-mainline: v4.19-rc1
+Git-commit: 964fc35c0910c7970120f893fa866e6b3468dcf0
+References: bsc#1127371
+
+Signed-off-by: Ganesh Goudar <ganeshgr@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Denis Kirjanov <dkirjanov@suse.com>
+---
+ drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+index c7f8d0441278..e3adf435913e 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+@@ -188,6 +188,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
+ CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */
+ CH_PCI_ID_TABLE_FENTRY(0x50ad), /* Custom T520-CR */
++ CH_PCI_ID_TABLE_FENTRY(0x50ae), /* Custom T540-XL-SO */
+
+ /* T6 adapters:
+ */
+--
+2.12.3
+
diff --git a/patches.fixes/0001-cxgb4-Add-support-for-FW_ETH_TX_PKT_VM_WR.patch b/patches.fixes/0001-cxgb4-Add-support-for-FW_ETH_TX_PKT_VM_WR.patch
new file mode 100644
index 0000000000..06603a0a43
--- /dev/null
+++ b/patches.fixes/0001-cxgb4-Add-support-for-FW_ETH_TX_PKT_VM_WR.patch
@@ -0,0 +1,483 @@
+From: Arjun Vynipadath <arjun@chelsio.com>
+Patch-mainline: v4.19-rc1
+Git-commit: d5fbda61ac923e0adb89fd59fdf4a1d99406b86e
+Subject: cxgb4: Add support for FW_ETH_TX_PKT_VM_WR
+References: bsc#1127371
+
+The present TX workrequest(FW_ETH_TX_PKT_WR) cant be used for
+host->vf communication, since it doesn't loopback the outgoing
+packets to virtual interfaces on the same port. This can be done
+using FW_ETH_TX_PKT_VM_WR.
+This fix depends on ethtool_flags to determine what WR to use for
+TX path. Support for setting this flags by user is added in next
+commit.
+
+Based on the original work by : Casey Leedom <leedom@chelsio.com>
+
+Signed-off-by: Casey Leedom <leedom@chelsio.com>
+Signed-off-by: Arjun Vynipadath <arjun@chelsio.com>
+Signed-off-by: Ganesh Goudar <ganeshgr@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Denis Kirjanov <dkirjanov@suse.com>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 13 +-
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 2 +-
+ drivers/net/ethernet/chelsio/cxgb4/sge.c | 372 +++++++++++++++++++++++-
+ 3 files changed, 383 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+index 56244acf7ce6..ff093ffe4a67 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+@@ -520,6 +520,15 @@ enum {
+ MAX_INGQ = MAX_ETH_QSETS + INGQ_EXTRAS,
+ };
+
++enum {
++ PRIV_FLAG_PORT_TX_VM_BIT,
++};
++
++#define PRIV_FLAG_PORT_TX_VM BIT(PRIV_FLAG_PORT_TX_VM_BIT)
++
++#define PRIV_FLAGS_ADAP 0
++#define PRIV_FLAGS_PORT PRIV_FLAG_PORT_TX_VM
++
+ struct adapter;
+ struct sge_rspq;
+
+@@ -556,6 +565,7 @@ struct port_info {
+ struct hwtstamp_config tstamp_config;
+ bool ptp_enable;
+ struct sched_table *sched_tbl;
++ u32 eth_flags;
+ };
+
+ struct dentry;
+@@ -870,6 +880,7 @@ struct adapter {
+ unsigned int flags;
+ unsigned int adap_idx;
+ enum chip_type chip;
++ u32 eth_flags;
+
+ int msg_enable;
+ __be16 vxlan_port;
+@@ -1329,7 +1340,7 @@ void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
+ void t4_free_sge_resources(struct adapter *adap);
+ void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q);
+ irq_handler_t t4_intr_handler(struct adapter *adap);
+-netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev);
++netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev);
+ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *gl);
+ int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+index 2ebc9b281616..23d5e3424a08 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -3190,7 +3190,7 @@ static netdev_features_t cxgb_fix_features(struct net_device *dev,
+ static const struct net_device_ops cxgb4_netdev_ops = {
+ .ndo_open = cxgb_open,
+ .ndo_stop = cxgb_close,
+- .ndo_start_xmit = t4_eth_xmit,
++ .ndo_start_xmit = t4_start_xmit,
+ .ndo_select_queue = cxgb_select_queue,
+ .ndo_get_stats64 = cxgb_get_stats,
+ .ndo_set_rx_mode = cxgb_set_rxmode,
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+index a8025e5f8842..eb4322488190 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+@@ -1295,13 +1295,13 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb,
+ }
+
+ /**
+- * t4_eth_xmit - add a packet to an Ethernet Tx queue
++ * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
+ * @skb: the packet
+ * @dev: the egress net device
+ *
+ * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
+ */
+-netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ u32 wr_mid, ctrl0, op;
+ u64 cntrl, *end, *sgl;
+@@ -1549,6 +1549,374 @@ out_free: dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
++/* Constants ... */
++enum {
++ /* Egress Queue sizes, producer and consumer indices are all in units
++ * of Egress Context Units bytes. Note that as far as the hardware is
++ * concerned, the free list is an Egress Queue (the host produces free
++ * buffers which the hardware consumes) and free list entries are
++ * 64-bit PCI DMA addresses.
++ */
++ EQ_UNIT = SGE_EQ_IDXSIZE,
++ FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
++ TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
++
++ T4VF_ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
++ sizeof(struct cpl_tx_pkt_lso_core) +
++ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
++};
++
++/**
++ * t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data?
++ * @skb: the packet
++ *
++ * Returns whether an Ethernet packet is small enough to fit completely as
++ * immediate data.
++ */
++static inline int t4vf_is_eth_imm(const struct sk_buff *skb)
++{
++ /* The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
++ * which does not accommodate immediate data. We could dike out all
++ * of the support code for immediate data but that would tie our hands
++ * too much if we ever want to enhace the firmware. It would also
++ * create more differences between the PF and VF Drivers.
++ */
++ return false;
++}
++
++/**
++ * t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR
++ * @skb: the packet
++ *
++ * Returns the number of flits needed for a TX Work Request for the
++ * given Ethernet packet, including the needed WR and CPL headers.
++ */
++static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb)
++{
++ unsigned int flits;
++
++ /* If the skb is small enough, we can pump it out as a work request
++ * with only immediate data. In that case we just have to have the
++ * TX Packet header plus the skb data in the Work Request.
++ */
++ if (t4vf_is_eth_imm(skb))
++ return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
++ sizeof(__be64));
++
++ /* Otherwise, we're going to have to construct a Scatter gather list
++ * of the skb body and fragments. We also include the flits necessary
++ * for the TX Packet Work Request and CPL. We always have a firmware
++ * Write Header (incorporated as part of the cpl_tx_pkt_lso and
++ * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
++ * message or, if we're doing a Large Send Offload, an LSO CPL message
++ * with an embedded TX Packet Write CPL message.
++ */
++ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
++ if (skb_shinfo(skb)->gso_size)
++ flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
++ sizeof(struct cpl_tx_pkt_lso_core) +
++ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
++ else
++ flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
++ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
++ return flits;
++}
++
++/**
++ * cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue
++ * @skb: the packet
++ * @dev: the egress net device
++ *
++ * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled.
++ */
++static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
++ struct net_device *dev)
++{
++ dma_addr_t addr[MAX_SKB_FRAGS + 1];
++ const struct skb_shared_info *ssi;
++ struct fw_eth_tx_pkt_vm_wr *wr;
++ int qidx, credits, max_pkt_len;
++ struct cpl_tx_pkt_core *cpl;
++ const struct port_info *pi;
++ unsigned int flits, ndesc;
++ struct sge_eth_txq *txq;
++ struct adapter *adapter;
++ u64 cntrl, *end;
++ u32 wr_mid;
++ const size_t fw_hdr_copy_len = sizeof(wr->ethmacdst) +
++ sizeof(wr->ethmacsrc) +
++ sizeof(wr->ethtype) +
++ sizeof(wr->vlantci);
++
++ /* The chip minimum packet length is 10 octets but the firmware
++ * command that we are using requires that we copy the Ethernet header
++ * (including the VLAN tag) into the header so we reject anything
++ * smaller than that ...
++ */
++ if (unlikely(skb->len < fw_hdr_copy_len))
++ goto out_free;
++
++ /* Discard the packet if the length is greater than mtu */
++ max_pkt_len = ETH_HLEN + dev->mtu;
++ if (skb_vlan_tag_present(skb))
++ max_pkt_len += VLAN_HLEN;
++ if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
++ goto out_free;
++
++ /* Figure out which TX Queue we're going to use. */
++ pi = netdev_priv(dev);
++ adapter = pi->adapter;
++ qidx = skb_get_queue_mapping(skb);
++ WARN_ON(qidx >= pi->nqsets);
++ txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
++
++ /* Take this opportunity to reclaim any TX Descriptors whose DMA
++ * transfers have completed.
++ */
++ reclaim_completed_tx(adapter, &txq->q, true);
++
++ /* Calculate the number of flits and TX Descriptors we're going to
++ * need along with how many TX Descriptors will be left over after
++ * we inject our Work Request.
++ */
++ flits = t4vf_calc_tx_flits(skb);
++ ndesc = flits_to_desc(flits);
++ credits = txq_avail(&txq->q) - ndesc;
++
++ if (unlikely(credits < 0)) {
++ /* Not enough room for this packet's Work Request. Stop the
++ * TX Queue and return a "busy" condition. The queue will get
++ * started later on when the firmware informs us that space
++ * has opened up.
++ */
++ eth_txq_stop(txq);
++ dev_err(adapter->pdev_dev,
++ "%s: TX ring %u full while queue awake!\n",
++ dev->name, qidx);
++ return NETDEV_TX_BUSY;
++ }
++
++ if (!t4vf_is_eth_imm(skb) &&
++ unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) {
++ /* We need to map the skb into PCI DMA space (because it can't
++ * be in-lined directly into the Work Request) and the mapping
++ * operation failed. Record the error and drop the packet.
++ */
++ txq->mapping_err++;
++ goto out_free;
++ }
++
++ wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
++ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
++ /* After we're done injecting the Work Request for this
++ * packet, we'll be below our "stop threshold" so stop the TX
++ * Queue now and schedule a request for an SGE Egress Queue
++ * Update message. The queue will get started later on when
++ * the firmware processes this Work Request and sends us an
++ * Egress Queue Status Update message indicating that space
++ * has opened up.
++ */
++ eth_txq_stop(txq);
++ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
++ }
++
++ /* Start filling in our Work Request. Note that we do _not_ handle
++ * the WR Header wrapping around the TX Descriptor Ring. If our
++ * maximum header size ever exceeds one TX Descriptor, we'll need to
++ * do something else here.
++ */
++ WARN_ON(DIV_ROUND_UP(T4VF_ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
++ wr = (void *)&txq->q.desc[txq->q.pidx];
++ wr->equiq_to_len16 = cpu_to_be32(wr_mid);
++ wr->r3[0] = cpu_to_be32(0);
++ wr->r3[1] = cpu_to_be32(0);
++ skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
++ end = (u64 *)wr + flits;
++
++ /* If this is a Large Send Offload packet we'll put in an LSO CPL
++ * message with an encapsulated TX Packet CPL message. Otherwise we
++ * just use a TX Packet CPL message.
++ */
++ ssi = skb_shinfo(skb);
++ if (ssi->gso_size) {
++ struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
++ bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
++ int l3hdr_len = skb_network_header_len(skb);
++ int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
++
++ wr->op_immdlen =
++ cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
++ FW_WR_IMMDLEN_V(sizeof(*lso) +
++ sizeof(*cpl)));
++ /* Fill in the LSO CPL message. */
++ lso->lso_ctrl =
++ cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
++ LSO_FIRST_SLICE_F |
++ LSO_LAST_SLICE_F |
++ LSO_IPV6_V(v6) |
++ LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
++ LSO_IPHDR_LEN_V(l3hdr_len / 4) |
++ LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
++ lso->ipid_ofst = cpu_to_be16(0);
++ lso->mss = cpu_to_be16(ssi->gso_size);
++ lso->seqno_offset = cpu_to_be32(0);
++ if (is_t4(adapter->params.chip))
++ lso->len = cpu_to_be32(skb->len);
++ else
++ lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
++
++ /* Set up TX Packet CPL pointer, control word and perform
++ * accounting.
++ */
++ cpl = (void *)(lso + 1);
++
++ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
++ cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
++ else
++ cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
++
++ cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
++ TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
++ TXPKT_IPHDR_LEN_V(l3hdr_len);
++ txq->tso++;
++ txq->tx_cso += ssi->gso_segs;
++ } else {
++ int len;
++
++ len = (t4vf_is_eth_imm(skb)
++ ? skb->len + sizeof(*cpl)
++ : sizeof(*cpl));
++ wr->op_immdlen =
++ cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
++ FW_WR_IMMDLEN_V(len));
++
++ /* Set up TX Packet CPL pointer, control word and perform
++ * accounting.
++ */
++ cpl = (void *)(wr + 1);
++ if (skb->ip_summed == CHECKSUM_PARTIAL) {
++ cntrl = hwcsum(adapter->params.chip, skb) |
++ TXPKT_IPCSUM_DIS_F;
++ txq->tx_cso++;
++ } else {
++ cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
++ }
++ }
++
++ /* If there's a VLAN tag present, add that to the list of things to
++ * do in this Work Request.
++ */
++ if (skb_vlan_tag_present(skb)) {
++ txq->vlan_ins++;
++ cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
++ }
++
++ /* Fill in the TX Packet CPL message header. */
++ cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
++ TXPKT_INTF_V(pi->port_id) |
++ TXPKT_PF_V(0));
++ cpl->pack = cpu_to_be16(0);
++ cpl->len = cpu_to_be16(skb->len);
++ cpl->ctrl1 = cpu_to_be64(cntrl);
++
++ /* Fill in the body of the TX Packet CPL message with either in-lined
++ * data or a Scatter/Gather List.
++ */
++ if (t4vf_is_eth_imm(skb)) {
++ /* In-line the packet's data and free the skb since we don't
++ * need it any longer.
++ */
++ inline_tx_skb(skb, &txq->q, cpl + 1);
++ dev_consume_skb_any(skb);
++ } else {
++ /* Write the skb's Scatter/Gather list into the TX Packet CPL
++ * message and retain a pointer to the skb so we can free it
++ * later when its DMA completes. (We store the skb pointer
++ * in the Software Descriptor corresponding to the last TX
++ * Descriptor used by the Work Request.)
++ *
++ * The retained skb will be freed when the corresponding TX
++ * Descriptors are reclaimed after their DMAs complete.
++ * However, this could take quite a while since, in general,
++ * the hardware is set up to be lazy about sending DMA
++ * completion notifications to us and we mostly perform TX
++ * reclaims in the transmit routine.
++ *
++ * This is good for performamce but means that we rely on new
++ * TX packets arriving to run the destructors of completed
++ * packets, which open up space in their sockets' send queues.
++ * Sometimes we do not get such new packets causing TX to
++ * stall. A single UDP transmitter is a good example of this
++ * situation. We have a clean up timer that periodically
++ * reclaims completed packets but it doesn't run often enough
++ * (nor do we want it to) to prevent lengthy stalls. A
++ * solution to this problem is to run the destructor early,
++ * after the packet is queued but before it's DMAd. A con is
++ * that we lie to socket memory accounting, but the amount of
++ * extra memory is reasonable (limited by the number of TX
++ * descriptors), the packets do actually get freed quickly by
++ * new packets almost always, and for protocols like TCP that
++ * wait for acks to really free up the data the extra memory
++ * is even less. On the positive side we run the destructors
++ * on the sending CPU rather than on a potentially different
++ * completing CPU, usually a good thing.
++ *
++ * Run the destructor before telling the DMA engine about the
++ * packet to make sure it doesn't complete and get freed
++ * prematurely.
++ */
++ struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
++ struct sge_txq *tq = &txq->q;
++ int last_desc;
++
++ /* If the Work Request header was an exact multiple of our TX
++ * Descriptor length, then it's possible that the starting SGL
++ * pointer lines up exactly with the end of our TX Descriptor
++ * ring. If that's the case, wrap around to the beginning
++ * here ...
++ */
++ if (unlikely((void *)sgl == (void *)tq->stat)) {
++ sgl = (void *)tq->desc;
++ end = (void *)((void *)tq->desc +
++ ((void *)end - (void *)tq->stat));
++ }
++
++ write_sgl(skb, tq, sgl, end, 0, addr);
++ skb_orphan(skb);
++
++ last_desc = tq->pidx + ndesc - 1;
++ if (last_desc >= tq->size)
++ last_desc -= tq->size;
++ tq->sdesc[last_desc].skb = skb;
++ tq->sdesc[last_desc].sgl = sgl;
++ }
++
++ /* Advance our internal TX Queue state, tell the hardware about
++ * the new TX descriptors and return success.
++ */
++ txq_advance(&txq->q, ndesc);
++
++ ring_tx_db(adapter, &txq->q, ndesc);
++ return NETDEV_TX_OK;
++
++out_free:
++ /* An error of some sort happened. Free the TX skb and tell the
++ * OS that we've "dealt" with the packet ...
++ */
++ dev_kfree_skb_any(skb);
++ return NETDEV_TX_OK;
++}
++
++netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ struct port_info *pi = netdev_priv(dev);
++
++ if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM))
++ return cxgb4_vf_eth_xmit(skb, dev);
++
++ return cxgb4_eth_xmit(skb, dev);
++}
++
+ /**
+ * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
+ * @q: the SGE control Tx queue
+--
+2.12.3
+
diff --git a/patches.fixes/0001-cxgb4-Added-missing-break-in-ndo_udp_tunnel_-add-del.patch b/patches.fixes/0001-cxgb4-Added-missing-break-in-ndo_udp_tunnel_-add-del.patch
new file mode 100644
index 0000000000..9fb48cc3d3
--- /dev/null
+++ b/patches.fixes/0001-cxgb4-Added-missing-break-in-ndo_udp_tunnel_-add-del.patch
@@ -0,0 +1,42 @@
+From: Arjun Vynipadath <arjun@chelsio.com>
+Subject: cxgb4: Added missing break in ndo_udp_tunnel_{add/del}
+Patch-mainline: v4.18-rc8
+Git-commit: 942a656f1f228f06a37adad0e6c347773cfe7bd6
+References: bsc#1127371
+
+Break statements were missing for Geneve case in
+ndo_udp_tunnel_{add/del}, thereby raw mac matchall
+entries were not getting added.
+
+Fixes: c746fc0e8b2d("cxgb4: add geneve offload support for T6")
+Signed-off-by: Arjun Vynipadath <arjun@chelsio.com>
+Signed-off-by: Ganesh Goudar <ganeshgr@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Denis Kirjanov <dkirjanov@suse.com>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+index bc03c175a3cd..a8926e97935e 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -3072,6 +3072,7 @@ static void cxgb_del_udp_tunnel(struct net_device *netdev,
+
+ adapter->geneve_port = 0;
+ t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
++ break;
+ default:
+ return;
+ }
+@@ -3157,6 +3158,7 @@ static void cxgb_add_udp_tunnel(struct net_device *netdev,
+
+ t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
+ GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
++ break;
+ default:
+ return;
+ }
+--
+2.12.3
+
diff --git a/patches.fixes/0001-cxgb4-Support-ethtool-private-flags.patch b/patches.fixes/0001-cxgb4-Support-ethtool-private-flags.patch
new file mode 100644
index 0000000000..140266745e
--- /dev/null
+++ b/patches.fixes/0001-cxgb4-Support-ethtool-private-flags.patch
@@ -0,0 +1,109 @@
+From: Arjun Vynipadath <arjun@chelsio.com>
+Subject: cxgb4: Support ethtool private flags
+Patch-mainline: v4.19-rc1
+Git-commit: c90d160487c4f82ba128730bcbaf7da760c5bdf1
+References: bsc#1127371
+
+This is used to change TX workrequests, which helps in
+host->vf communication.
+
+Signed-off-by: Arjun Vynipadath <arjun@chelsio.com>
+Signed-off-by: Casey Leedom <leedom@chelsio.com>
+Signed-off-by: Ganesh Goudar <ganeshgr@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Denis Kirjanov <dkirjanov@suse.com>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 42 ++++++++++++++++++++++
+ 1 file changed, 42 insertions(+)
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+index f7eef93ffc87..ddb8b9eba6bf 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+@@ -177,6 +177,10 @@ static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
+ "bg3_frames_trunc ",
+ };
+
++static const char cxgb4_priv_flags_strings[][ETH_GSTRING_LEN] = {
++ [PRIV_FLAG_PORT_TX_VM_BIT] = "port_tx_vm_wr",
++};
++
+ static int get_sset_count(struct net_device *dev, int sset)
+ {
+ switch (sset) {
+@@ -185,6 +189,8 @@ static int get_sset_count(struct net_device *dev, int sset)
+ ARRAY_SIZE(adapter_stats_strings) +
+ ARRAY_SIZE(channel_stats_strings) +
+ ARRAY_SIZE(loopback_stats_strings);
++ case ETH_SS_PRIV_FLAGS:
++ return ARRAY_SIZE(cxgb4_priv_flags_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+@@ -235,6 +241,7 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+ FW_HDR_FW_VER_MINOR_G(exprom_vers),
+ FW_HDR_FW_VER_MICRO_G(exprom_vers),
+ FW_HDR_FW_VER_BUILD_G(exprom_vers));
++ info->n_priv_flags = ARRAY_SIZE(cxgb4_priv_flags_strings);
+ }
+
+ static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
+@@ -250,6 +257,9 @@ static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
+ data += sizeof(channel_stats_strings);
+ memcpy(data, loopback_stats_strings,
+ sizeof(loopback_stats_strings));
++ } else if (stringset == ETH_SS_PRIV_FLAGS) {
++ memcpy(data, cxgb4_priv_flags_strings,
++ sizeof(cxgb4_priv_flags_strings));
+ }
+ }
+
+@@ -1499,6 +1509,36 @@ static int cxgb4_get_module_eeprom(struct net_device *dev,
+ offset, len, &data[eprom->len - len]);
+ }
+
++static u32 cxgb4_get_priv_flags(struct net_device *netdev)
++{
++ struct port_info *pi = netdev_priv(netdev);
++ struct adapter *adapter = pi->adapter;
++
++ return (adapter->eth_flags | pi->eth_flags);
++}
++
++/**
++ * set_flags - set/unset specified flags if passed in new_flags
++ * @cur_flags: pointer to current flags
++ * @new_flags: new incoming flags
++ * @flags: set of flags to set/unset
++ */
++static inline void set_flags(u32 *cur_flags, u32 new_flags, u32 flags)
++{
++ *cur_flags = (*cur_flags & ~flags) | (new_flags & flags);
++}
++
++static int cxgb4_set_priv_flags(struct net_device *netdev, u32 flags)
++{
++ struct port_info *pi = netdev_priv(netdev);
++ struct adapter *adapter = pi->adapter;
++
++ set_flags(&adapter->eth_flags, flags, PRIV_FLAGS_ADAP);
++ set_flags(&pi->eth_flags, flags, PRIV_FLAGS_PORT);
++
++ return 0;
++}
++
+ static const struct ethtool_ops cxgb_ethtool_ops = {
+ .get_link_ksettings = get_link_ksettings,
+ .set_link_ksettings = set_link_ksettings,
+@@ -1535,6 +1575,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
+ .get_dump_data = get_dump_data,
+ .get_module_info = cxgb4_get_module_info,
+ .get_module_eeprom = cxgb4_get_module_eeprom,
++ .get_priv_flags = cxgb4_get_priv_flags,
++ .set_priv_flags = cxgb4_set_priv_flags,
+ };
+
+ void cxgb4_set_ethtool_ops(struct net_device *netdev)
+--
+2.12.3
+
diff --git a/patches.fixes/0001-cxgb4-cxgb4vf-Add-support-for-SGE-doorbell-queue-tim.patch b/patches.fixes/0001-cxgb4-cxgb4vf-Add-support-for-SGE-doorbell-queue-tim.patch
new file mode 100644
index 0000000000..6f6ba3f612
--- /dev/null
+++ b/patches.fixes/0001-cxgb4-cxgb4vf-Add-support-for-SGE-doorbell-queue-tim.patch
@@ -0,0 +1,874 @@
+From: Vishal Kulkarni <vishal@chelsio.com>
+Patch-mainline: v5.1-rc1
+Git-commit: d429005fdf2c9da19429c8b343eea61bd55b7c00
+Subject: cxgb4/cxgb4vf: Add support for SGE doorbell queue timer
+References: bsc#1127371
+
+T6 introduced a Timer Mechanism in SGE called the
+SGE Doorbell Queue Timer. With this we can now configure
+TX Queues to get CIDX Updates when:
+
+ Time(CIDX == PIDX) >= Timer
+
+Previously we rely on TX Queue Status Page updates by hardware
+for DMA completions. This will make Hardware/Firmware actually
+deliver the CIDX Updates as Ingress Queue messages with
+commensurate Interrupts.
+
+So we now have a new RX Path component for processing CIDX Updates
+and reclaiming TX Descriptors faster.
+
+Original work by: Casey Leedom <leedom@chelsio.com>
+
+Signed-off-by: Vishal Kulkarni <vishal@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Denis Kirjanov <dkirjanov@suse.com>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 10 +-
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 19 +-
+ drivers/net/ethernet/chelsio/cxgb4/sge.c | 323 ++++++++++++++++++++----
+ drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 41 +++
+ drivers/net/ethernet/chelsio/cxgb4/t4_hw.h | 1 +
+ drivers/net/ethernet/chelsio/cxgb4/t4_values.h | 6 +
+ drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | 24 +-
+ drivers/net/ethernet/chelsio/cxgb4vf/sge.c | 27 +-
+ 8 files changed, 376 insertions(+), 75 deletions(-)
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+index ff093ffe4a67..debd87c706ce 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+@@ -583,6 +583,7 @@ enum { /* adapter flags */
+ FW_OFLD_CONN = (1 << 9),
+ ROOT_NO_RELAXED_ORDERING = (1 << 10),
+ SHUTTING_DOWN = (1 << 11),
++ SGE_DBQ_TIMER = (1 << 12),
+ };
+
+ enum {
+@@ -723,6 +724,8 @@ struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
+ #ifdef CONFIG_CHELSIO_T4_DCB
+ u8 dcb_prio; /* DCB Priority bound to queue */
+ #endif
++ u8 dbqt; /* SGE Doorbell Queue Timer in use */
++ unsigned int dbqtimerix; /* SGE Doorbell Queue Timer Index */
+ unsigned long tso; /* # of TSO requests */
+ unsigned long tx_cso; /* # of Tx checksum offloads */
+ unsigned long vlan_ins; /* # of Tx VLAN insertions */
+@@ -783,6 +786,7 @@ struct sge {
+ u16 nqs_per_uld; /* # of Rx queues per ULD */
+ u16 timer_val[SGE_NTIMERS];
+ u8 counter_val[SGE_NCOUNTERS];
++ u16 dbqtimer_val[SGE_NDBQTIMERS];
+ u32 fl_pg_order; /* large page allocation size */
+ u32 stat_len; /* length of status page at ring end */
+ u32 pktshift; /* padding between CPL & packet data */
+@@ -1351,7 +1355,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
+ rspq_flush_handler_t flush_handler, int cong);
+ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
+ struct net_device *dev, struct netdev_queue *netdevq,
+- unsigned int iqid);
++ unsigned int iqid, u8 dbqt);
+ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
+ struct net_device *dev, unsigned int iqid,
+ unsigned int cmplqid);
+@@ -1364,6 +1368,8 @@ irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
+ int t4_sge_init(struct adapter *adap);
+ void t4_sge_start(struct adapter *adap);
+ void t4_sge_stop(struct adapter *adap);
++int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *q,
++ int maxreclaim);
+ void cxgb4_set_ethtool_ops(struct net_device *netdev);
+ int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
+ enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb);
+@@ -1763,6 +1769,8 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid);
+ int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type);
++int t4_read_sge_dbqtimers(struct adapter *adap, unsigned int ndbqtimers,
++ u16 *dbqtimers);
+ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl);
+ int t4_update_port_info(struct port_info *pi);
+ int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+index 23d5e3424a08..a068d417b52f 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -528,7 +528,7 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
+ struct sge_eth_txq *eq;
+
+ eq = container_of(txq, struct sge_eth_txq, q);
+- netif_tx_wake_queue(eq->txq);
++ t4_sge_eth_txq_egress_update(q->adap, eq, -1);
+ } else {
+ struct sge_uld_txq *oq;
+
+@@ -887,10 +887,13 @@ static int setup_sge_queues(struct adapter *adap)
+ q->rspq.idx = j;
+ memset(&q->stats, 0, sizeof(q->stats));
+ }
+- for (j = 0; j < pi->nqsets; j++, t++) {
++
++ q = &s->ethrxq[pi->first_qset];
++ for (j = 0; j < pi->nqsets; j++, t++, q++) {
+ err = t4_sge_alloc_eth_txq(adap, t, dev,
+ netdev_get_tx_queue(dev, j),
+- s->fw_evtq.cntxt_id);
++ q->rspq.cntxt_id,
++ !!(adap->flags & SGE_DBQ_TIMER));
+ if (err)
+ goto freeout;
+ }
+@@ -912,7 +915,7 @@ static int setup_sge_queues(struct adapter *adap)
+ if (!is_t4(adap->params.chip)) {
+ err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0],
+ netdev_get_tx_queue(adap->port[0], 0)
+- , s->fw_evtq.cntxt_id);
++ , s->fw_evtq.cntxt_id, false);
+ if (err)
+ goto freeout;
+ }
+@@ -4227,6 +4230,14 @@ static int adap_init0(struct adapter *adap)
+ if (ret < 0)
+ goto bye;
+
++ /* Grab the SGE Doorbell Queue Timer values. If successful, that
++ * indicates that the Firmware and Hardware support this.
++ */
++ ret = t4_read_sge_dbqtimers(adap, ARRAY_SIZE(adap->sge.dbqtimer_val),
++ adap->sge.dbqtimer_val);
++ if (!ret)
++ adap->flags |= SGE_DBQ_TIMER;
++
+ if (is_bypass_device(adap->pdev->device))
+ adap->params.bypass = 1;
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+index eb4322488190..292fea574f1a 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+@@ -78,9 +78,10 @@
+ * Max number of Tx descriptors we clean up at a time. Should be modest as
+ * freeing skbs isn't cheap and it happens while holding locks. We just need
+ * to free packets faster than they arrive, we eventually catch up and keep
+- * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES.
++ * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. It should
++ * also match the CIDX Flush Threshold.
+ */
+-#define MAX_TX_RECLAIM 16
++#define MAX_TX_RECLAIM 32
+
+ /*
+ * Max number of Rx buffers we replenish at a time. Again keep this modest,
+@@ -414,29 +415,51 @@ static inline int reclaimable(const struct sge_txq *q)
+ * reclaim_completed_tx - reclaims completed Tx descriptors
+ * @adap: the adapter
+ * @q: the Tx queue to reclaim completed descriptors from
++ * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1
+ * @unmap: whether the buffers should be unmapped for DMA
+ *
+- * Reclaims Tx descriptors that the SGE has indicated it has processed,
+- * and frees the associated buffers if possible. Called with the Tx
+- * queue locked.
++ * Reclaims Tx Descriptors that the SGE has indicated it has processed,
++ * and frees the associated buffers if possible. If @max == -1, then
++ * we'll use a defaiult maximum. Called with the TX Queue locked.
+ */
+-static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
+- bool unmap)
++static inline int reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
++ int maxreclaim, bool unmap)
+ {
+- int avail = reclaimable(q);
++ int reclaim = reclaimable(q);
+
+- if (avail) {
++ if (reclaim) {
+ /*
+ * Limit the amount of clean up work we do at a time to keep
+ * the Tx lock hold time O(1).
+ */
+- if (avail > MAX_TX_RECLAIM)
+- avail = MAX_TX_RECLAIM;
++ if (maxreclaim < 0)
++ maxreclaim = MAX_TX_RECLAIM;
++ if (reclaim > maxreclaim)
++ reclaim = maxreclaim;
+
+- free_tx_desc(adap, q, avail, unmap);
+- q->in_use -= avail;
++ free_tx_desc(adap, q, reclaim, unmap);
++ q->in_use -= reclaim;
+ }
++
++ return reclaim;
++}
++
++/**
++ * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors
++ * @adap: the adapter
++ * @q: the Tx queue to reclaim completed descriptors from
++ * @unmap: whether the buffers should be unmapped for DMA
++ *
++ * Reclaims Tx descriptors that the SGE has indicated it has processed,
++ * and frees the associated buffers if possible. Called with the Tx
++ * queue locked.
++ */
++void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
++ bool unmap)
++{
++ (void)reclaim_completed_tx(adap, q, -1, unmap);
+ }
++EXPORT_SYMBOL(cxgb4_reclaim_completed_tx);
+
+ static inline int get_buf_size(struct adapter *adapter,
+ const struct rx_sw_desc *d)
+@@ -1295,6 +1318,44 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb,
+ }
+
+ /**
++ * t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update
++ * @adap: the adapter
++ * @eq: the Ethernet TX Queue
++ * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1
++ *
++ * We're typically called here to update the state of an Ethernet TX
++ * Queue with respect to the hardware's progress in consuming the TX
++ * Work Requests that we've put on that Egress Queue. This happens
++ * when we get Egress Queue Update messages and also prophylactically
++ * in regular timer-based Ethernet TX Queue maintenance.
++ */
++int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
++ int maxreclaim)
++{
++ struct sge_txq *q = &eq->q;
++ unsigned int reclaimed;
++
++ if (!q->in_use || !__netif_tx_trylock(eq->txq))
++ return 0;
++
++ /* Reclaim pending completed TX Descriptors. */
++ reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true);
++
++ /* If the TX Queue is currently stopped and there's now more than half
++ * the queue available, restart it. Otherwise bail out since the rest
++ * of what we want do here is with the possibility of shipping any
++ * currently buffered Coalesced TX Work Request.
++ */
++ if (netif_tx_queue_stopped(eq->txq) && txq_avail(q) > (q->size / 2)) {
++ netif_tx_wake_queue(eq->txq);
++ eq->q.restarts++;
++ }
++
++ __netif_tx_unlock(eq->txq);
++ return reclaimed;
++}
++
++/**
+ * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
+ * @skb: the packet
+ * @dev: the egress net device
+@@ -1358,7 +1419,7 @@ out_free: dev_kfree_skb_any(skb);
+ }
+ skb_tx_timestamp(skb);
+
+- reclaim_completed_tx(adap, &q->q, true);
++ reclaim_completed_tx(adap, &q->q, -1, true);
+ cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
+
+ #ifdef CONFIG_CHELSIO_T4_FCOE
+@@ -1401,8 +1462,25 @@ out_free: dev_kfree_skb_any(skb);
+
+ wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
++ /* After we're done injecting the Work Request for this
++ * packet, we'll be below our "stop threshold" so stop the TX
++ * Queue now and schedule a request for an SGE Egress Queue
++ * Update message. The queue will get started later on when
++ * the firmware processes this Work Request and sends us an
++ * Egress Queue Status Update message indicating that space
++ * has opened up.
++ */
+ eth_txq_stop(q);
+- wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
++
++ /* If we're using the SGE Doorbell Queue Timer facility, we
++ * don't need to ask the Firmware to send us Egress Queue CIDX
++ * Updates: the Hardware will do this automatically. And
++ * since we send the Ingress Queue CIDX Updates to the
++ * corresponding Ethernet Response Queue, we'll get them very
++ * quickly.
++ */
++ if (!q->dbqt)
++ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
+ wr = (void *)&q->q.desc[q->q.pidx];
+@@ -1673,7 +1751,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
+ /* Take this opportunity to reclaim any TX Descriptors whose DMA
+ * transfers have completed.
+ */
+- reclaim_completed_tx(adapter, &txq->q, true);
++ reclaim_completed_tx(adapter, &txq->q, -1, true);
+
+ /* Calculate the number of flits and TX Descriptors we're going to
+ * need along with how many TX Descriptors will be left over after
+@@ -1717,7 +1795,16 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
+ * has opened up.
+ */
+ eth_txq_stop(txq);
+- wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
++
++ /* If we're using the SGE Doorbell Queue Timer facility, we
++ * don't need to ask the Firmware to send us Egress Queue CIDX
++ * Updates: the Hardware will do this automatically. And
++ * since we send the Ingress Queue CIDX Updates to the
++ * corresponding Ethernet Response Queue, we'll get them very
++ * quickly.
++ */
++ if (!txq->dbqt)
++ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
+ /* Start filling in our Work Request. Note that we do _not_ handle
+@@ -2202,7 +2289,7 @@ static void service_ofldq(struct sge_uld_txq *q)
+ */
+ spin_unlock(&q->sendq.lock);
+
+- reclaim_completed_tx(q->adap, &q->q, false);
++ cxgb4_reclaim_completed_tx(q->adap, &q->q, false);
+
+ flits = skb->priority; /* previously saved */
+ ndesc = flits_to_desc(flits);
+@@ -2700,6 +2787,74 @@ static int t4_tx_hststamp(struct adapter *adapter, struct sk_buff *skb,
+ }
+
+ /**
++ * t4_tx_completion_handler - handle CPL_SGE_EGR_UPDATE messages
++ * @rspq: Ethernet RX Response Queue associated with Ethernet TX Queue
++ * @rsp: Response Entry pointer into Response Queue
++ * @gl: Gather List pointer
++ *
++ * For adapters which support the SGE Doorbell Queue Timer facility,
++ * we configure the Ethernet TX Queues to send CIDX Updates to the
++ * Associated Ethernet RX Response Queue with CPL_SGE_EGR_UPDATE
++ * messages. This adds a small load to PCIe Link RX bandwidth and,
++ * potentially, higher CPU Interrupt load, but allows us to respond
++ * much more quickly to the CIDX Updates. This is important for
++ * Upper Layer Software which isn't willing to have a large amount
++ * of TX Data outstanding before receiving DMA Completions.
++ */
++static void t4_tx_completion_handler(struct sge_rspq *rspq,
++ const __be64 *rsp,
++ const struct pkt_gl *gl)
++{
++ u8 opcode = ((const struct rss_header *)rsp)->opcode;
++ struct port_info *pi = netdev_priv(rspq->netdev);
++ struct adapter *adapter = rspq->adap;
++ struct sge *s = &adapter->sge;
++ struct sge_eth_txq *txq;
++
++ /* skip RSS header */
++ rsp++;
++
++ /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
++ */
++ if (unlikely(opcode == CPL_FW4_MSG &&
++ ((const struct cpl_fw4_msg *)rsp)->type ==
++ FW_TYPE_RSSCPL)) {
++ rsp++;
++ opcode = ((const struct rss_header *)rsp)->opcode;
++ rsp++;
++ }
++
++ if (unlikely(opcode != CPL_SGE_EGR_UPDATE)) {
++ pr_info("%s: unexpected FW4/CPL %#x on Rx queue\n",
++ __func__, opcode);
++ return;
++ }
++
++ txq = &s->ethtxq[pi->first_qset + rspq->idx];
++
++ /* We've got the Hardware Consumer Index Update in the Egress Update
++ * message. If we're using the SGE Doorbell Queue Timer mechanism,
++ * these Egress Update messages will be our sole CIDX Updates we get
++ * since we don't want to chew up PCIe bandwidth for both Ingress
++ * Messages and Status Page writes. However, The code which manages
++ * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value
++ * stored in the Status Page at the end of the TX Queue. It's easiest
++ * to simply copy the CIDX Update value from the Egress Update message
++ * to the Status Page. Also note that no Endian issues need to be
++ * considered here since both are Big Endian and we're just copying
++ * bytes consistently ...
++ */
++ if (txq->dbqt) {
++ struct cpl_sge_egr_update *egr;
++
++ egr = (struct cpl_sge_egr_update *)rsp;
++ WRITE_ONCE(txq->q.stat->cidx, egr->cidx);
++ }
++
++ t4_sge_eth_txq_egress_update(adapter, txq, -1);
++}
++
++/**
+ * t4_ethrx_handler - process an ingress ethernet packet
+ * @q: the response queue that received the packet
+ * @rsp: the response queue descriptor holding the RX_PKT message
+@@ -2722,6 +2877,15 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
+ struct port_info *pi;
+ int ret = 0;
+
++ /* If we're looking at TX Queue CIDX Update, handle that separately
++ * and return.
++ */
++ if (unlikely((*(u8 *)rsp == CPL_FW4_MSG) ||
++ (*(u8 *)rsp == CPL_SGE_EGR_UPDATE))) {
++ t4_tx_completion_handler(q, rsp, si);
++ return 0;
++ }
++
+ if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
+ return handle_trace_pkt(q->adap, si);
+
+@@ -3195,10 +3359,10 @@ static void sge_rx_timer_cb(unsigned long data)
+
+ static void sge_tx_timer_cb(unsigned long data)
+ {
+- unsigned long m;
+- unsigned int i, budget;
+ struct adapter *adap = (struct adapter *)data;
+ struct sge *s = &adap->sge;
++ unsigned long m, period;
++ unsigned int i, budget;
+
+ for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
+ for (m = s->txq_maperr[i]; m; m &= m - 1) {
+@@ -3226,29 +3390,29 @@ static void sge_tx_timer_cb(unsigned long data)
+ budget = MAX_TIMER_TX_RECLAIM;
+ i = s->ethtxq_rover;
+ do {
+- struct sge_eth_txq *q = &s->ethtxq[i];
+-
+- if (q->q.in_use &&
+- time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
+- __netif_tx_trylock(q->txq)) {
+- int avail = reclaimable(&q->q);
+-
+- if (avail) {
+- if (avail > budget)
+- avail = budget;
+-
+- free_tx_desc(adap, &q->q, avail, true);
+- q->q.in_use -= avail;
+- budget -= avail;
+- }
+- __netif_tx_unlock(q->txq);
+- }
++ budget -= t4_sge_eth_txq_egress_update(adap, &s->ethtxq[i],
++ budget);
++ if (!budget)
++ break;
+
+ if (++i >= s->ethqsets)
+ i = 0;
+- } while (budget && i != s->ethtxq_rover);
++ } while (i != s->ethtxq_rover);
+ s->ethtxq_rover = i;
+- mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
++
++ if (budget == 0) {
++ /* If we found too many reclaimable packets schedule a timer
++ * in the near future to continue where we left off.
++ */
++ period = 2;
++ } else {
++ /* We reclaimed all reclaimable TX Descriptors, so reschedule
++ * at the normal period.
++ */
++ period = TX_QCHECK_PERIOD;
++ }
++
++ mod_timer(&s->tx_timer, jiffies + period);
+ }
+
+ /**
+@@ -3325,7 +3489,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
+ c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F);
+
+ if (fl) {
+- enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
++ unsigned int chip_ver =
++ CHELSIO_CHIP_VERSION(adap->params.chip);
+
+ /* Allocate the ring for the hardware free list (with space
+ * for its status page) along with the associated software
+@@ -3363,10 +3528,10 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
+ * the smaller 64-byte value there).
+ */
+ c.fl0dcaen_to_fl0cidxfthresh =
+- htons(FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ?
++ htons(FW_IQ_CMD_FL0FBMIN_V(chip_ver <= CHELSIO_T5 ?
+ FETCHBURSTMIN_128B_X :
+- FETCHBURSTMIN_64B_X) |
+- FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
++ FETCHBURSTMIN_64B_T6_X) |
++ FW_IQ_CMD_FL0FBMAX_V((chip_ver <= CHELSIO_T5) ?
+ FETCHBURSTMAX_512B_X :
+ FETCHBURSTMAX_256B_X));
+ c.fl0size = htons(flsz);
+@@ -3488,14 +3653,24 @@ static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
+ adap->sge.egr_map[id - adap->sge.egr_start] = q;
+ }
+
++/**
++ * t4_sge_alloc_eth_txq - allocate an Ethernet TX Queue
++ * @adap: the adapter
++ * @txq: the SGE Ethernet TX Queue to initialize
++ * @dev: the Linux Network Device
++ * @netdevq: the corresponding Linux TX Queue
++ * @iqid: the Ingress Queue to which to deliver CIDX Update messages
++ * @dbqt: whether this TX Queue will use the SGE Doorbell Queue Timers
++ */
+ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
+ struct net_device *dev, struct netdev_queue *netdevq,
+- unsigned int iqid)
++ unsigned int iqid, u8 dbqt)
+ {
+- int ret, nentries;
+- struct fw_eq_eth_cmd c;
+- struct sge *s = &adap->sge;
++ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
+ struct port_info *pi = netdev_priv(dev);
++ struct sge *s = &adap->sge;
++ struct fw_eq_eth_cmd c;
++ int ret, nentries;
+
+ /* Add status entries */
+ nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
+@@ -3514,19 +3689,47 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
+ FW_EQ_ETH_CMD_VFN_V(0));
+ c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
+ FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
+- c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
+- FW_EQ_ETH_CMD_VIID_V(pi->viid));
++
++ /* For TX Ethernet Queues using the SGE Doorbell Queue Timer
++ * mechanism, we use Ingress Queue messages for Hardware Consumer
++ * Index Updates on the TX Queue. Otherwise we have the Hardware
++ * write the CIDX Updates into the Status Page at the end of the
++ * TX Queue.
++ */
++ c.autoequiqe_to_viid = htonl((dbqt
++ ? FW_EQ_ETH_CMD_AUTOEQUIQE_F
++ : FW_EQ_ETH_CMD_AUTOEQUEQE_F) |
++ FW_EQ_ETH_CMD_VIID_V(pi->viid));
++
+ c.fetchszm_to_iqid =
+- htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
++ htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(dbqt
++ ? HOSTFCMODE_INGRESS_QUEUE_X
++ : HOSTFCMODE_STATUS_PAGE_X) |
+ FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
+ FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
++
++ /* Note that the CIDX Flush Threshold should match MAX_TX_RECLAIM. */
+ c.dcaen_to_eqsize =
+- htonl(FW_EQ_ETH_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
++ htonl(FW_EQ_ETH_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
++ ? FETCHBURSTMIN_64B_X
++ : FETCHBURSTMIN_64B_T6_X) |
+ FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
+ FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+ FW_EQ_ETH_CMD_EQSIZE_V(nentries));
++
+ c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
++ /* If we're using the SGE Doorbell Queue Timer mechanism, pass in the
++ * currently configured Timer Index. THis can be changed later via an
++ * ethtool -C tx-usecs {Timer Val} command. Note that the SGE
++ * Doorbell Queue mode is currently automatically enabled in the
++ * Firmware by setting either AUTOEQUEQE or AUTOEQUIQE ...
++ */
++ if (dbqt)
++ c.timeren_timerix =
++ cpu_to_be32(FW_EQ_ETH_CMD_TIMEREN_F |
++ FW_EQ_ETH_CMD_TIMERIX_V(txq->dbqtimerix));
++
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ if (ret) {
+ kfree(txq->q.sdesc);
+@@ -3543,6 +3746,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
+ txq->txq = netdevq;
+ txq->tso = txq->tx_cso = txq->vlan_ins = 0;
+ txq->mapping_err = 0;
++ txq->dbqt = dbqt;
++
+ return 0;
+ }
+
+@@ -3550,10 +3755,11 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
+ struct net_device *dev, unsigned int iqid,
+ unsigned int cmplqid)
+ {
+- int ret, nentries;
+- struct fw_eq_ctrl_cmd c;
+- struct sge *s = &adap->sge;
++ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
+ struct port_info *pi = netdev_priv(dev);
++ struct sge *s = &adap->sge;
++ struct fw_eq_ctrl_cmd c;
++ int ret, nentries;
+
+ /* Add status entries */
+ nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
+@@ -3577,7 +3783,9 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
+ FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
+ FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid));
+ c.dcaen_to_eqsize =
+- htonl(FW_EQ_CTRL_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
++ htonl(FW_EQ_CTRL_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
++ ? FETCHBURSTMIN_64B_X
++ : FETCHBURSTMIN_64B_T6_X) |
+ FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
+ FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+ FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
+@@ -3617,6 +3825,7 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
+ struct net_device *dev, unsigned int iqid,
+ unsigned int uld_type)
+ {
++ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
+ int ret, nentries;
+ struct fw_eq_ofld_cmd c;
+ struct sge *s = &adap->sge;
+@@ -3647,7 +3856,9 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
+ FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
+ FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
+ c.dcaen_to_eqsize =
+- htonl(FW_EQ_OFLD_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
++ htonl(FW_EQ_OFLD_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
++ ? FETCHBURSTMIN_64B_X
++ : FETCHBURSTMIN_64B_T6_X) |
+ FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
+ FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+ FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+index 48d0bd15ce0a..e64c1da80d74 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+@@ -6641,6 +6641,47 @@ int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
+ }
+
+ /**
++ * t4_read_sge_dbqtimers - reag SGE Doorbell Queue Timer values
++ * @adap - the adapter
++ * @ndbqtimers: size of the provided SGE Doorbell Queue Timer table
++ * @dbqtimers: SGE Doorbell Queue Timer table
++ *
++ * Reads the SGE Doorbell Queue Timer values into the provided table.
++ * Returns 0 on success (Firmware and Hardware support this feature),
++ * an error on failure.
++ */
++int t4_read_sge_dbqtimers(struct adapter *adap, unsigned int ndbqtimers,
++ u16 *dbqtimers)
++{
++ int ret, dbqtimerix;
++
++ ret = 0;
++ dbqtimerix = 0;
++ while (dbqtimerix < ndbqtimers) {
++ int nparams, param;
++ u32 params[7], vals[7];
++
++ nparams = ndbqtimers - dbqtimerix;
++ if (nparams > ARRAY_SIZE(params))
++ nparams = ARRAY_SIZE(params);
++
++ for (param = 0; param < nparams; param++)
++ params[param] =
++ (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
++ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMER) |
++ FW_PARAMS_PARAM_Y_V(dbqtimerix + param));
++ ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
++ nparams, params, vals);
++ if (ret)
++ break;
++
++ for (param = 0; param < nparams; param++)
++ dbqtimers[dbqtimerix++] = vals[param];
++ }
++ return ret;
++}
++
++/**
+ * t4_fw_hello - establish communication with FW
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+index 361d5032c288..002fc62ea726 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+@@ -91,6 +91,7 @@ enum {
+ SGE_CTXT_SIZE = 24, /* size of SGE context */
+ SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
+ SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
++ SGE_NDBQTIMERS = 8, /* # of Doorbell Queue Timer values */
+ SGE_MAX_IQ_SIZE = 65520,
+
+ SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
+index f6558cbfc54e..eb1aa82149db 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
+@@ -71,12 +71,18 @@
+ #define FETCHBURSTMIN_64B_X 2
+ #define FETCHBURSTMIN_128B_X 3
+
++/* T6 and later use a single-bit encoding for FetchBurstMin */
++#define FETCHBURSTMIN_64B_T6_X 0
++#define FETCHBURSTMIN_128B_T6_X 1
++
+ #define FETCHBURSTMAX_256B_X 2
+ #define FETCHBURSTMAX_512B_X 3
+
++#define HOSTFCMODE_INGRESS_QUEUE_X 1
+ #define HOSTFCMODE_STATUS_PAGE_X 2
+
+ #define CIDXFLUSHTHRESH_32_X 5
++#define CIDXFLUSHTHRESH_128_X 7
+
+ #define UPDATEDELIVERY_INTERRUPT_X 1
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+index e08e57b498ef..35e8915499f8 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+@@ -1209,6 +1209,8 @@ enum fw_params_param_dev {
+ FW_PARAMS_PARAM_DEV_HMA_SIZE = 0x20,
+ FW_PARAMS_PARAM_DEV_RDMA_WRITE_WITH_IMM = 0x21,
+ FW_PARAMS_PARAM_DEV_RI_WRITE_CMPL_WR = 0x24,
++ FW_PARAMS_PARAM_DEV_DBQ_TIMER = 0x29,
++ FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK = 0x2A,
+ };
+
+ /*
+@@ -1277,6 +1279,7 @@ enum fw_params_param_dmaq {
+ FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11,
+ FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12,
+ FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13,
++ FW_PARAMS_PARAM_DMAQ_EQ_TIMERIX = 0x15,
+ FW_PARAMS_PARAM_DMAQ_CONM_CTXT = 0x20,
+ };
+
+@@ -1693,8 +1696,8 @@ struct fw_eq_eth_cmd {
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+- __be32 viid_pkd;
+- __be32 r8_lo;
++ __be32 autoequiqe_to_viid;
++ __be32 timeren_timerix;
+ __be64 r9;
+ };
+
+@@ -1789,6 +1792,10 @@ struct fw_eq_eth_cmd {
+ #define FW_EQ_ETH_CMD_EQSIZE_S 0
+ #define FW_EQ_ETH_CMD_EQSIZE_V(x) ((x) << FW_EQ_ETH_CMD_EQSIZE_S)
+
++#define FW_EQ_ETH_CMD_AUTOEQUIQE_S 31
++#define FW_EQ_ETH_CMD_AUTOEQUIQE_V(x) ((x) << FW_EQ_ETH_CMD_AUTOEQUIQE_S)
++#define FW_EQ_ETH_CMD_AUTOEQUIQE_F FW_EQ_ETH_CMD_AUTOEQUIQE_V(1U)
++
+ #define FW_EQ_ETH_CMD_AUTOEQUEQE_S 30
+ #define FW_EQ_ETH_CMD_AUTOEQUEQE_V(x) ((x) << FW_EQ_ETH_CMD_AUTOEQUEQE_S)
+ #define FW_EQ_ETH_CMD_AUTOEQUEQE_F FW_EQ_ETH_CMD_AUTOEQUEQE_V(1U)
+@@ -1796,6 +1803,19 @@ struct fw_eq_eth_cmd {
+ #define FW_EQ_ETH_CMD_VIID_S 16
+ #define FW_EQ_ETH_CMD_VIID_V(x) ((x) << FW_EQ_ETH_CMD_VIID_S)
+
++#define FW_EQ_ETH_CMD_TIMEREN_S 3
++#define FW_EQ_ETH_CMD_TIMEREN_M 0x1
++#define FW_EQ_ETH_CMD_TIMEREN_V(x) ((x) << FW_EQ_ETH_CMD_TIMEREN_S)
++#define FW_EQ_ETH_CMD_TIMEREN_G(x) \
++ (((x) >> FW_EQ_ETH_CMD_TIMEREN_S) & FW_EQ_ETH_CMD_TIMEREN_M)
++#define FW_EQ_ETH_CMD_TIMEREN_F FW_EQ_ETH_CMD_TIMEREN_V(1U)
++
++#define FW_EQ_ETH_CMD_TIMERIX_S 0
++#define FW_EQ_ETH_CMD_TIMERIX_M 0x7
++#define FW_EQ_ETH_CMD_TIMERIX_V(x) ((x) << FW_EQ_ETH_CMD_TIMERIX_S)
++#define FW_EQ_ETH_CMD_TIMERIX_G(x) \
++ (((x) >> FW_EQ_ETH_CMD_TIMERIX_S) & FW_EQ_ETH_CMD_TIMERIX_M)
++
+ struct fw_eq_ctrl_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+index 93994cd7d72d..f26a9a42b0dd 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+@@ -2273,7 +2273,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
+ cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
+
+ if (fl) {
+- enum chip_type chip =
++ unsigned int chip_ver =
+ CHELSIO_CHIP_VERSION(adapter->params.chip);
+ /*
+ * Allocate the ring for the hardware free list (with space
+@@ -2324,10 +2324,10 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
+ */
+ cmd.fl0dcaen_to_fl0cidxfthresh =
+ cpu_to_be16(
+- FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ?
+- FETCHBURSTMIN_128B_X :
+- FETCHBURSTMIN_64B_X) |
+- FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
++ FW_IQ_CMD_FL0FBMIN_V(chip_ver <= CHELSIO_T5
++ ? FETCHBURSTMIN_128B_X
++ : FETCHBURSTMIN_64B_T6_X) |
++ FW_IQ_CMD_FL0FBMAX_V((chip_ver <= CHELSIO_T5) ?
+ FETCHBURSTMAX_512B_X :
+ FETCHBURSTMAX_256B_X));
+ cmd.fl0size = cpu_to_be16(flsz);
+@@ -2416,10 +2416,11 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
+ struct net_device *dev, struct netdev_queue *devq,
+ unsigned int iqid)
+ {
++ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
++ struct port_info *pi = netdev_priv(dev);
++ struct fw_eq_eth_cmd cmd, rpl;
+ struct sge *s = &adapter->sge;
+ int ret, nentries;
+- struct fw_eq_eth_cmd cmd, rpl;
+- struct port_info *pi = netdev_priv(dev);
+
+ /*
+ * Calculate the size of the hardware TX Queue (including the Status
+@@ -2453,17 +2454,19 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
+ cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC_F |
+ FW_EQ_ETH_CMD_EQSTART_F |
+ FW_LEN16(cmd));
+- cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
+- FW_EQ_ETH_CMD_VIID_V(pi->viid));
++ cmd.autoequiqe_to_viid = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
++ FW_EQ_ETH_CMD_VIID_V(pi->viid));
+ cmd.fetchszm_to_iqid =
+ cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE_V(SGE_HOSTFCMODE_STPG) |
+ FW_EQ_ETH_CMD_PCIECHN_V(pi->port_id) |
+ FW_EQ_ETH_CMD_IQID_V(iqid));
+ cmd.dcaen_to_eqsize =
+- cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(SGE_FETCHBURSTMIN_64B) |
+- FW_EQ_ETH_CMD_FBMAX_V(SGE_FETCHBURSTMAX_512B) |
++ cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
++ ? FETCHBURSTMIN_64B_X
++ : FETCHBURSTMIN_64B_T6_X) |
++ FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
+ FW_EQ_ETH_CMD_CIDXFTHRESH_V(
+- SGE_CIDXFLUSHTHRESH_32) |
++ CIDXFLUSHTHRESH_32_X) |
+ FW_EQ_ETH_CMD_EQSIZE_V(nentries));
+ cmd.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+--
+2.12.3
+
diff --git a/patches.fixes/0001-cxgb4-cxgb4vf-Link-management-changes.patch b/patches.fixes/0001-cxgb4-cxgb4vf-Link-management-changes.patch
new file mode 100644
index 0000000000..51337ef871
--- /dev/null
+++ b/patches.fixes/0001-cxgb4-cxgb4vf-Link-management-changes.patch
@@ -0,0 +1,229 @@
+From: Vishal Kulkarni <vishal@chelsio.com>
+Subject: cxgb4/cxgb4vf: Link management changes
+Patch-mainline: v5.1-rc1
+Git-commit: 95eb7882dfdde9642362586a858734c233335a95
+References: bsc#1127371
+
+1) Speed should be supported by Physical Port Capabilities.
+2) report Forward Error Correction mode which are available.
+3) Added few comments.
+
+Signed-off-by: Casey Leedom <leedom@chelsio.com>
+Signed-off-by: Vishal Kulkarni <vishal@chelsio.com>
+Signed-off-by: Arjun Vynipadath <arjun@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Denis Kirjanov <dkirjanov@suse.com>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 24 ++++++-----
+ drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 47 ++++++++++++++++++++--
+ drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | 6 ++-
+ 3 files changed, 63 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+index d07230c892a5..796043544fc3 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+@@ -446,8 +446,10 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
+ unsigned long *link_mode_mask)
+ {
+ #define SET_LMM(__lmm_name) \
+- __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
+- link_mode_mask)
++ do { \
++ __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
++ link_mode_mask); \
++ } while (0)
+
+ #define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \
+ do { \
+@@ -541,7 +543,7 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
+ case FW_PORT_TYPE_CR4_QSFP:
+ SET_LMM(FIBRE);
+ FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
+- FW_CAPS_TO_LMM(SPEED_10G, 10000baseSR_Full);
++ FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
+ FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
+ FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
+ FW_CAPS_TO_LMM(SPEED_50G, 50000baseCR2_Full);
+@@ -552,6 +554,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
+ break;
+ }
+
++ if (fw_caps & FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M)) {
++ FW_CAPS_TO_LMM(FEC_RS, FEC_RS);
++ FW_CAPS_TO_LMM(FEC_BASER_RS, FEC_BASER);
++ } else {
++ SET_LMM(FEC_NONE);
++ }
++
+ FW_CAPS_TO_LMM(ANEG, Autoneg);
+ FW_CAPS_TO_LMM(802_3_PAUSE, Pause);
+ FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause);
+@@ -679,18 +688,15 @@ static int set_link_ksettings(struct net_device *dev,
+ base->autoneg == AUTONEG_DISABLE) {
+ fw_caps = speed_to_fw_caps(base->speed);
+
+- /* Must only specify a single speed which must be supported
+- * as part of the Physical Port Capabilities.
+- */
+- if ((fw_caps & (fw_caps - 1)) != 0 ||
+- !(lc->pcaps & fw_caps))
++ /* Speed must be supported by Physical Port Capabilities. */
++ if (!(lc->pcaps & fw_caps))
+ return -EINVAL;
+
+ lc->speed_caps = fw_caps;
+ lc->acaps = fw_caps;
+ } else {
+ fw_caps =
+- lmm_to_fw_caps(link_ksettings->link_modes.advertising);
++ lmm_to_fw_caps(link_ksettings->link_modes.advertising);
+ if (!(lc->pcaps & fw_caps))
+ return -EINVAL;
+ lc->speed_caps = 0;
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+index 2b03f6187a24..c5e5466ee38b 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+@@ -4105,6 +4105,9 @@ static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
+ * @mbox: the Firmware Mailbox to use
+ * @port: the Port ID
+ * @lc: the Port's Link Configuration
++ * @sleep_ok: if true we may sleep while awaiting command completion
++ * @timeout: time to wait for command to finish before timing out
++ * (negative implies @sleep_ok=false)
+ *
+ * Set up a port's MAC and PHY according to a desired link configuration.
+ * - If the PHY can auto-negotiate first decide what to advertise, then
+@@ -4124,6 +4127,7 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
+ int ret;
+
+ fw_mdi = (FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO) & lc->pcaps);
++
+ /* Convert driver coding of Pause Frame Flow Control settings into the
+ * Firmware's API.
+ */
+@@ -4143,8 +4147,13 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
+ fw_fec = cc_to_fwcap_fec(cc_fec);
+
+ /* Figure out what our Requested Port Capabilities are going to be.
++ * Note parallel structure in t4_handle_get_port_info() and
++ * init_link_config().
+ */
+ if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
++ if (lc->autoneg == AUTONEG_ENABLE)
++ return -EINVAL;
++
+ rcap = lc->acaps | fw_fc | fw_fec;
+ lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
+ lc->fec = cc_fec;
+@@ -4156,7 +4165,11 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
+ rcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
+ }
+
+- /* Note that older Firmware doesn't have FW_PORT_CAP32_FORCE_PAUSE, so
++ /* Some Requested Port Capabilities are trivially wrong if they exceed
++ * the Physical Port Capabilities. We can check that here and provide
++ * moderately useful feedback in the system log.
++ *
++ * Note that older Firmware doesn't have FW_PORT_CAP32_FORCE_PAUSE, so
+ * we need to exclude this from this check in order to maintain
+ * compatibility ...
+ */
+@@ -4185,6 +4198,13 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
+
+ ret = t4_wr_mbox_meat_timeout(adapter, mbox, &cmd, sizeof(cmd), NULL,
+ sleep_ok, timeout);
++
++ /* Unfortunately, even if the Requested Port Capabilities "fit" within
++ * the Physical Port Capabilities, some combinations of features may
++ * still not be leagal. For example, 40Gb/s and Reed-Solomon Forward
++ * Error Correction. So if the Firmware rejects the L1 Configure
++ * request, flag that here.
++ */
+ if (ret) {
+ dev_err(adapter->pdev_dev,
+ "Requested Port Capabilities %#x rejected, error %d\n",
+@@ -8461,6 +8481,10 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
+ fc = fwcap_to_cc_pause(linkattr);
+ speed = fwcap_to_speed(linkattr);
+
++ /* Reset state for communicating new Transceiver Module status and
++ * whether the OS-dependent layer wants us to redo the current
++ * "sticky" L1 Configure Link Parameters.
++ */
+ lc->new_module = false;
+ lc->redo_l1cfg = false;
+
+@@ -8497,9 +8521,15 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
+ */
+ pi->port_type = port_type;
+
++ /* Record new Module Type information.
++ */
+ pi->mod_type = mod_type;
+
++ /* Let the OS-dependent layer know if we have a new
++ * Transceiver Module inserted.
++ */
+ lc->new_module = t4_is_inserted_mod_type(mod_type);
++
+ t4_os_portmod_changed(adapter, pi->port_id);
+ }
+
+@@ -8507,8 +8537,10 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
+ fc != lc->fc || fec != lc->fec) { /* something changed */
+ if (!link_ok && lc->link_ok) {
+ lc->link_down_rc = linkdnrc;
+- dev_warn(adapter->pdev_dev, "Port %d link down, reason: %s\n",
+- pi->tx_chan, t4_link_down_rc_str(linkdnrc));
++ dev_warn_ratelimited(adapter->pdev_dev,
++ "Port %d link down, reason: %s\n",
++ pi->tx_chan,
++ t4_link_down_rc_str(linkdnrc));
+ }
+ lc->link_ok = link_ok;
+ lc->speed = speed;
+@@ -8518,6 +8550,11 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
+ lc->lpacaps = lpacaps;
+ lc->acaps = acaps & ADVERT_MASK;
+
++ /* If we're not physically capable of Auto-Negotiation, note
++ * this as Auto-Negotiation disabled. Otherwise, we track
++ * what Auto-Negotiation settings we have. Note parallel
++ * structure in t4_link_l1cfg_core() and init_link_config().
++ */
+ if (!(lc->acaps & FW_PORT_CAP32_ANEG)) {
+ lc->autoneg = AUTONEG_DISABLE;
+ } else if (lc->acaps & FW_PORT_CAP32_ANEG) {
+@@ -8535,6 +8572,10 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
+ t4_os_link_changed(adapter, pi->port_id, link_ok);
+ }
+
++ /* If we have a new Transceiver Module and the OS-dependent code has
++ * told us that it wants us to redo whatever "sticky" L1 Configuration
++ * Link Parameters are set, do that now.
++ */
+ if (lc->new_module && lc->redo_l1cfg) {
+ struct link_config old_lc;
+ int ret;
+diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+index 5b8c08cf523f..84dff74ca9cd 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
++++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+@@ -2005,8 +2005,10 @@ static void t4vf_handle_get_port_info(struct port_info *pi,
+ fc != lc->fc || fec != lc->fec) { /* something changed */
+ if (!link_ok && lc->link_ok) {
+ lc->link_down_rc = linkdnrc;
+- dev_warn(adapter->pdev_dev, "Port %d link down, reason: %s\n",
+- pi->port_id, t4vf_link_down_rc_str(linkdnrc));
++ dev_warn_ratelimited(adapter->pdev_dev,
++ "Port %d link down, reason: %s\n",
++ pi->port_id,
++ t4vf_link_down_rc_str(linkdnrc));
+ }
+ lc->link_ok = link_ok;
+ lc->speed = speed;
+--
+2.12.3
+
diff --git a/patches.fixes/0001-cxgb4-do-not-return-DUPLEX_UNKNOWN-when-link-is-down.patch b/patches.fixes/0001-cxgb4-do-not-return-DUPLEX_UNKNOWN-when-link-is-down.patch
new file mode 100644
index 0000000000..80874aac12
--- /dev/null
+++ b/patches.fixes/0001-cxgb4-do-not-return-DUPLEX_UNKNOWN-when-link-is-down.patch
@@ -0,0 +1,48 @@
+From: Ganesh Goudar <ganeshgr@chelsio.com>
+Subject: cxgb4: do not return DUPLEX_UNKNOWN when link is down
+Patch-mainline: v4.19-rc1
+Git-commit: bc1b50309ce19bb2ccb1340fa83ab56ca6da8f96
+References: bsc#1127371
+
+We were returning DUPLEX_UNKNOWN in get_link_ksettings() when
+the link was down. Unfortunately, this causes a problem when
+"ethtool -s autoneg on" is issued for a link which is down because
+the ethtool code first reads the settings and then reapplies them
+with only the changes provided on the command line. Which results
+in us diving into set_link_ksettings() with DUPLEX_UNKNOWN which is
+not DUPLEX_FULL, so set_link_ksettings() throws an -EINVAL error.
+do not return DUPLEX_UNKNOWN to fix the issue.
+
+Signed-off-by: Casey Leedom <leedom@chelsio.com>
+Signed-off-by: Ganesh Goudar <ganeshgr@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Denis Kirjanov <dkirjanov@suse.com>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 11 ++++-------
+ 1 file changed, 4 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+index a14a290a56ee..d07230c892a5 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+@@ -628,13 +628,10 @@ static int get_link_ksettings(struct net_device *dev,
+ fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps,
+ link_ksettings->link_modes.lp_advertising);
+
+- if (netif_carrier_ok(dev)) {
+- base->speed = pi->link_cfg.speed;
+- base->duplex = DUPLEX_FULL;
+- } else {
+- base->speed = SPEED_UNKNOWN;
+- base->duplex = DUPLEX_UNKNOWN;
+- }
++ base->speed = (netif_carrier_ok(dev)
++ ? pi->link_cfg.speed
++ : SPEED_UNKNOWN);
++ base->duplex = DUPLEX_FULL;
+
+ if (pi->link_cfg.fc & PAUSE_RX) {
+ if (pi->link_cfg.fc & PAUSE_TX) {
+--
+2.12.3
+
diff --git a/patches.fixes/0001-futex-Ensure-that-futex-address-is-aligned-in-handle.patch b/patches.fixes/0001-futex-Ensure-that-futex-address-is-aligned-in-handle.patch
new file mode 100644
index 0000000000..9fba700a58
--- /dev/null
+++ b/patches.fixes/0001-futex-Ensure-that-futex-address-is-aligned-in-handle.patch
@@ -0,0 +1,51 @@
+From 9dd1c106b83fa1bbc95eac98713915ffcd1c4875 Mon Sep 17 00:00:00 2001
+From: Chen Jie <chenjie6@huawei.com>
+Date: Thu, 11 Apr 2019 10:29:20 -0700
+Subject: [PATCH] futex: Ensure that futex address is aligned in handle_futex_death()
+Git-commit: 5a07168d8d89b00fe1760120714378175b3ef992
+Patch-mainline: v5.1-rc2
+References: bsc#1050549
+
+The futex code requires that the user space addresses of futexes are 32bit
+aligned. sys_futex() checks this in futex_get_keys() but the robust list
+code has no alignment check in place.
+
+As a consequence the kernel crashes on architectures with strict alignment
+requirements in handle_futex_death() when trying to cmpxchg() on an
+unaligned futex address which was retrieved from the robust list.
+
+[ tglx: Rewrote changelog, proper sizeof() based alignement check and add
+ comment ]
+
+Fixes: 0771dfefc9e5 ("[PATCH] lightweight robust futexes: core")
+Signed-off-by: Chen Jie <chenjie6@huawei.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: <dvhart@infradead.org>
+Cc: <peterz@infradead.org>
+Cc: <zengweilin@huawei.com>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/1552621478-119787-1-git-send-email-chenjie6@huawei.com
+Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
+
+---
+ kernel/futex.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/kernel/futex.c b/kernel/futex.c
+index ef9a165e4159..280c148acb2a 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -3370,6 +3370,10 @@ int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
+ {
+ u32 uval, uninitialized_var(nval), mval;
+
++ /* Futex address must be 32bit aligned */
++ if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
++ return -1;
++
+ retry:
+ if (get_user(uval, uaddr))
+ return -1;
+--
+2.16.4
+
diff --git a/patches.fixes/0009-xfs-rewrite-getbmap-using-the-xfs_iext_-helpers.patch b/patches.fixes/0009-xfs-rewrite-getbmap-using-the-xfs_iext_-helpers.patch
index ab207e1630..1312454d5a 100644
--- a/patches.fixes/0009-xfs-rewrite-getbmap-using-the-xfs_iext_-helpers.patch
+++ b/patches.fixes/0009-xfs-rewrite-getbmap-using-the-xfs_iext_-helpers.patch
@@ -34,10 +34,10 @@ Acked-by: Nikolay Borisov <nborisov@suse.com>
1 file changed, 208 insertions(+), 317 deletions(-)
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
-index 57379703098a..db951747e356 100644
+index 6503cfa44262..2564b8b33e99 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
-@@ -387,125 +387,103 @@ xfs_bmap_count_blocks(
+@@ -405,125 +405,103 @@ xfs_bmap_count_blocks(
return 0;
}
@@ -176,8 +176,8 @@ index 57379703098a..db951747e356 100644
- agno = XFS_FSB_TO_AGNO(mp, map->br_startblock);
- agbno = XFS_FSB_TO_AGBNO(mp, map->br_startblock);
-- error = xfs_reflink_find_shared(mp, agno, agbno, map->br_blockcount,
-- &ebno, &elen, true);
+- error = xfs_reflink_find_shared(mp, NULL, agno, agbno,
+- map->br_blockcount, &ebno, &elen, true);
- if (error)
- return error;
+ bmv->bmv_offset = p->bmv_offset + p->bmv_length;
@@ -248,7 +248,7 @@ index 57379703098a..db951747e356 100644
}
/*
-@@ -522,119 +500,72 @@ xfs_getbmap(
+@@ -540,119 +518,72 @@ xfs_getbmap(
xfs_bmap_format_t formatter, /* format to user */
void *arg) /* formatter arg */
{
@@ -410,7 +410,7 @@ index 57379703098a..db951747e356 100644
case XFS_DATA_FORK:
if (!(iflags & BMV_IF_DELALLOC) &&
(ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
-@@ -652,147 +583,107 @@ xfs_getbmap(
+@@ -670,147 +601,107 @@ xfs_getbmap(
*/
}
@@ -640,5 +640,4 @@ index 57379703098a..db951747e356 100644
error = formatter(&arg, &out[i]);
if (error)
--
-2.7.4
-
+2.16.4
diff --git a/patches.fixes/KEYS-always-initialize-keyring_index_key-desc_len.patch b/patches.fixes/KEYS-always-initialize-keyring_index_key-desc_len.patch
new file mode 100644
index 0000000000..0ebcd2e520
--- /dev/null
+++ b/patches.fixes/KEYS-always-initialize-keyring_index_key-desc_len.patch
@@ -0,0 +1,104 @@
+From ede0fa98a900e657d1fcd80b50920efc896c1a4c Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Fri, 22 Feb 2019 15:36:18 +0000
+Subject: [PATCH] KEYS: always initialize keyring_index_key::desc_len
+Git-commit: ede0fa98a900e657d1fcd80b50920efc896c1a4c
+Patch-mainline: v5.0-rc8
+References: bsc#1051510
+
+syzbot hit the 'BUG_ON(index_key->desc_len == 0);' in __key_link_begin()
+called from construct_alloc_key() during sys_request_key(), because the
+length of the key description was never calculated.
+
+The problem is that we rely on ->desc_len being initialized by
+search_process_keyrings(), specifically by search_nested_keyrings().
+But, if the process isn't subscribed to any keyrings that never happens.
+
+Fix it by always initializing keyring_index_key::desc_len as soon as the
+description is set, like we already do in some places.
+
+The following program reproduces the BUG_ON() when it's run as root and
+no session keyring has been installed. If it doesn't work, try removing
+pam_keyinit.so from /etc/pam.d/login and rebooting.
+
+ #include <stdlib.h>
+ #include <unistd.h>
+ #include <keyutils.h>
+
+ int main(void)
+ {
+ int id = add_key("keyring", "syz", NULL, 0, KEY_SPEC_USER_KEYRING);
+
+ keyctl_setperm(id, KEY_OTH_WRITE);
+ setreuid(5000, 5000);
+ request_key("user", "desc", "", id);
+ }
+
+Reported-by: syzbot+ec24e95ea483de0a24da@syzkaller.appspotmail.com
+Fixes: b2a4df200d57 ("KEYS: Expand the capacity of a keyring")
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: James Morris <james.morris@microsoft.com>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ security/keys/keyring.c | 4 +---
+ security/keys/proc.c | 3 +--
+ security/keys/request_key.c | 1 +
+ security/keys/request_key_auth.c | 2 +-
+ 4 files changed, 4 insertions(+), 6 deletions(-)
+
+--- a/security/keys/keyring.c
++++ b/security/keys/keyring.c
+@@ -659,9 +659,6 @@ static bool search_nested_keyrings(struc
+ BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
+ (ctx->flags & STATE_CHECKS) == STATE_CHECKS);
+
+- if (ctx->index_key.description)
+- ctx->index_key.desc_len = strlen(ctx->index_key.description);
+-
+ /* Check to see if this top-level keyring is what we are looking for
+ * and whether it is valid or not.
+ */
+@@ -919,6 +916,7 @@ key_ref_t keyring_search(key_ref_t keyri
+ struct keyring_search_context ctx = {
+ .index_key.type = type,
+ .index_key.description = description,
++ .index_key.desc_len = strlen(description),
+ .cred = current_cred(),
+ .match_data.cmp = key_default_cmp,
+ .match_data.raw_data = description,
+--- a/security/keys/proc.c
++++ b/security/keys/proc.c
+@@ -186,8 +186,7 @@ static int proc_keys_show(struct seq_fil
+ int rc;
+
+ struct keyring_search_context ctx = {
+- .index_key.type = key->type,
+- .index_key.description = key->description,
++ .index_key = key->index_key,
+ .cred = current_cred(),
+ .match_data.cmp = lookup_user_key_possessed,
+ .match_data.raw_data = key,
+--- a/security/keys/request_key.c
++++ b/security/keys/request_key.c
+@@ -545,6 +545,7 @@ struct key *request_key_and_link(struct
+ struct keyring_search_context ctx = {
+ .index_key.type = type,
+ .index_key.description = description,
++ .index_key.desc_len = strlen(description),
+ .cred = current_cred(),
+ .match_data.cmp = key_default_cmp,
+ .match_data.raw_data = description,
+--- a/security/keys/request_key_auth.c
++++ b/security/keys/request_key_auth.c
+@@ -247,7 +247,7 @@ struct key *key_get_instantiation_authke
+ struct key *authkey;
+ key_ref_t authkey_ref;
+
+- sprintf(description, "%x", target_id);
++ ctx.index_key.desc_len = sprintf(description, "%x", target_id);
+
+ authkey_ref = search_process_keyrings(&ctx);
+
diff --git a/patches.fixes/KEYS-user-Align-the-payload-buffer.patch b/patches.fixes/KEYS-user-Align-the-payload-buffer.patch
new file mode 100644
index 0000000000..d9052c84cd
--- /dev/null
+++ b/patches.fixes/KEYS-user-Align-the-payload-buffer.patch
@@ -0,0 +1,47 @@
+From cc1780fc42c76c705dd07ea123f1143dc5057630 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Wed, 20 Feb 2019 13:32:11 +0000
+Subject: [PATCH] KEYS: user: Align the payload buffer
+Git-commit: cc1780fc42c76c705dd07ea123f1143dc5057630
+Patch-mainline: v5.0-rc8
+References: bsc#1051510
+
+Align the payload of "user" and "logon" keys so that users of the
+keyrings service can access it as a struct that requires more than
+2-byte alignment. fscrypt currently does this which results in the read
+of fscrypt_key::size being misaligned as it needs 4-byte alignment.
+
+Align to __alignof__(u64) rather than __alignof__(long) since in the
+future it's conceivable that people would use structs beginning with
+u64, which on some platforms would require more than 'long' alignment.
+
+Reported-by: Aaro Koskinen <aaro.koskinen@iki.fi>
+Fixes: 2aa349f6e37c ("[PATCH] Keys: Export user-defined keyring operations")
+Fixes: 88bd6ccdcdd6 ("ext4 crypto: add encryption key management facilities")
+Cc: stable@vger.kernel.org
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Tested-by: Aaro Koskinen <aaro.koskinen@iki.fi>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: James Morris <james.morris@microsoft.com>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ include/keys/user-type.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/keys/user-type.h b/include/keys/user-type.h
+index e098cbe27db5..12babe991594 100644
+--- a/include/keys/user-type.h
++++ b/include/keys/user-type.h
+@@ -31,7 +31,7 @@
+ struct user_key_payload {
+ struct rcu_head rcu; /* RCU destructor */
+ unsigned short datalen; /* length of this data */
+- char data[0]; /* actual data */
++ char data[0] __aligned(__alignof__(u64)); /* actual data */
+ };
+
+ extern struct key_type key_type_user;
+--
+2.16.4
+
diff --git a/patches.fixes/crypto-pcbc-remove-bogus-memcpy-s-with-src-dest.patch b/patches.fixes/crypto-pcbc-remove-bogus-memcpy-s-with-src-dest.patch
new file mode 100644
index 0000000000..1d80b93889
--- /dev/null
+++ b/patches.fixes/crypto-pcbc-remove-bogus-memcpy-s-with-src-dest.patch
@@ -0,0 +1,92 @@
+From 251b7aea34ba3c4d4fdfa9447695642eb8b8b098 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Thu, 3 Jan 2019 20:16:13 -0800
+Subject: [PATCH] crypto: pcbc - remove bogus memcpy()s with src == dest
+Git-commit: 251b7aea34ba3c4d4fdfa9447695642eb8b8b098
+Patch-mainline: v5.1-rc1
+References: bsc#1051510
+
+The memcpy()s in the PCBC implementation use walk->iv as both the source
+and destination, which has undefined behavior. These memcpy()'s are
+actually unneeded, because walk->iv is already used to hold the previous
+plaintext block XOR'd with the previous ciphertext block. Thus,
+walk->iv is already updated to its final value.
+
+So remove the broken and unnecessary memcpy()s.
+
+Fixes: 91652be5d1b9 ("[CRYPTO] pcbc: Add Propagated CBC template")
+Cc: <stable@vger.kernel.org> # v2.6.21+
+Cc: David Howells <dhowells@redhat.com>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ crypto/pcbc.c | 14 ++++----------
+ 1 file changed, 4 insertions(+), 10 deletions(-)
+
+--- a/crypto/pcbc.c
++++ b/crypto/pcbc.c
+@@ -50,7 +50,7 @@ static int crypto_pcbc_encrypt_segment(s
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
+- u8 *iv = walk->iv;
++ u8 * const iv = walk->iv;
+
+ do {
+ crypto_xor(iv, src, bsize);
+@@ -72,7 +72,7 @@ static int crypto_pcbc_encrypt_inplace(s
+ int bsize = crypto_cipher_blocksize(tfm);
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+- u8 *iv = walk->iv;
++ u8 * const iv = walk->iv;
+ u8 tmpbuf[bsize];
+
+ do {
+@@ -85,8 +85,6 @@ static int crypto_pcbc_encrypt_inplace(s
+ src += bsize;
+ } while ((nbytes -= bsize) >= bsize);
+
+- memcpy(walk->iv, iv, bsize);
+-
+ return nbytes;
+ }
+
+@@ -122,7 +120,7 @@ static int crypto_pcbc_decrypt_segment(s
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
+- u8 *iv = walk->iv;
++ u8 * const iv = walk->iv;
+
+ do {
+ crypto_cipher_decrypt_one(tfm, dst, src);
+@@ -134,8 +132,6 @@ static int crypto_pcbc_decrypt_segment(s
+ dst += bsize;
+ } while ((nbytes -= bsize) >= bsize);
+
+- memcpy(walk->iv, iv, bsize);
+-
+ return nbytes;
+ }
+
+@@ -146,7 +142,7 @@ static int crypto_pcbc_decrypt_inplace(s
+ int bsize = crypto_cipher_blocksize(tfm);
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+- u8 *iv = walk->iv;
++ u8 * const iv = walk->iv;
+ u8 tmpbuf[bsize] __aligned(__alignof__(u32));
+
+ do {
+@@ -159,8 +155,6 @@ static int crypto_pcbc_decrypt_inplace(s
+ src += bsize;
+ } while ((nbytes -= bsize) >= bsize);
+
+- memcpy(walk->iv, iv, bsize);
+-
+ return nbytes;
+ }
+
diff --git a/patches.fixes/fs-avoid-fdput-after-failed-fdget-in-vfs_dedupe_file.patch b/patches.fixes/fs-avoid-fdput-after-failed-fdget-in-vfs_dedupe_file.patch
new file mode 100644
index 0000000000..4703b85aa6
--- /dev/null
+++ b/patches.fixes/fs-avoid-fdput-after-failed-fdget-in-vfs_dedupe_file.patch
@@ -0,0 +1,49 @@
+From 22762711479959754e005f5bb8e6abc37bf9e0ba Mon Sep 17 00:00:00 2001
+From: Zev Weiss <zev@bewilderbeest.net>
+Date: Sat, 14 Apr 2018 01:16:58 -0500
+Subject: [PATCH] fs: avoid fdput() after failed fdget() in
+ vfs_dedupe_file_range()
+Git-commit: 22762711479959754e005f5bb8e6abc37bf9e0ba
+Patch-mainline: v4.18-rc1
+References: bsc#1132384, bsc#1132219
+
+It's a fairly inconsequential bug, since fdput() won't actually try to
+fput() the file due to fd.flags (and thus FDPUT_FPUT) being zero in
+the failure case, but most other vfs code takes steps to avoid this.
+
+Signed-off-by: Zev Weiss <zev@bewilderbeest.net>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Acked-by: Anthony Iliopoulos <ailiopoulos@suse.com>
+
+---
+ fs/read_write.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/fs/read_write.c b/fs/read_write.c
+index c4eabbfc90df..e83bd9744b5d 100644
+--- a/fs/read_write.c
++++ b/fs/read_write.c
+@@ -2023,7 +2023,7 @@ int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
+ ret = mnt_want_write_file(dst_file);
+ if (ret) {
+ info->status = ret;
+- goto next_loop;
++ goto next_fdput;
+ }
+
+ dst_off = info->dest_offset;
+@@ -2058,9 +2058,9 @@ int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
+
+ next_file:
+ mnt_drop_write_file(dst_file);
+-next_loop:
++next_fdput:
+ fdput(dst_fd);
+-
++next_loop:
+ if (fatal_signal_pending(current))
+ goto out;
+ }
+--
+2.16.4
+
diff --git a/patches.fixes/ring-buffer-Check-if-memory-is-available-before-allo.patch b/patches.fixes/ring-buffer-Check-if-memory-is-available-before-allo.patch
new file mode 100644
index 0000000000..a7a2383c63
--- /dev/null
+++ b/patches.fixes/ring-buffer-Check-if-memory-is-available-before-allo.patch
@@ -0,0 +1,72 @@
+From 2a872fa4e9c8adc79c830e4009e1cc0c013a9d8a Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Mon, 2 Apr 2018 10:33:56 -0400
+Subject: [PATCH] ring-buffer: Check if memory is available before allocation
+Git-commit: 2a872fa4e9c8adc79c830e4009e1cc0c013a9d8a
+Patch-mainline: v4.17-rc1
+References: bsc#1132531
+
+The ring buffer is made up of a link list of pages. When making the ring
+buffer bigger, it will allocate all the pages it needs before adding to the
+ring buffer, and if it fails, it frees them and returns an error. This makes
+increasing the ring buffer size an all or nothing action. When this was
+first created, the pages were allocated with "NORETRY". This was to not
+cause any Out-Of-Memory (OOM) actions from allocating the ring buffer. But
+NORETRY was too strict, as the ring buffer would fail to expand even when
+there's memory available, but was taken up in the page cache.
+
+Commit 848618857d253 ("tracing/ring_buffer: Try harder to allocate") changed
+the allocating from NORETRY to RETRY_MAYFAIL. The RETRY_MAYFAIL would
+allocate from the page cache, but if there was no memory available, it would
+simple fail the allocation and not trigger an OOM.
+
+This worked fine, but had one problem. As the ring buffer would allocate one
+page at a time, it could take up all memory in the system before it failed
+to allocate and free that memory. If the allocation is happening and the
+ring buffer allocates all memory and then tries to take more than available,
+its allocation will not trigger an OOM, but if there's any allocation that
+happens someplace else, that could trigger an OOM, even though once the ring
+buffer's allocation fails, it would free up all the previous memory it tried
+to allocate, and allow other memory allocations to succeed.
+
+Commit d02bd27bd33dd ("mm/page_alloc.c: calculate 'available' memory in a
+separate function") separated out si_mem_availble() as a separate function
+that could be used to see how much memory is available in the system. Using
+this function to make sure that the ring buffer could be allocated before it
+tries to allocate pages we can avoid allocating all memory in the system and
+making it vulnerable to OOMs if other allocations are taking place.
+
+Link: http://lkml.kernel.org/r/1522320104-6573-1-git-send-email-zhaoyang.huang@spreadtrum.com
+
+Cc: stable@vger.kernel.org
+Cc: linux-mm@kvack.org
+Fixes: 848618857d253 ("tracing/ring_buffer: Try harder to allocate")
+Requires: d02bd27bd33dd ("mm/page_alloc.c: calculate 'available' memory in a separate function")
+Reported-by: Zhaoyang Huang <huangzhaoyang@gmail.com>
+Tested-by: Joel Fernandes <joelaf@google.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Acked-by: Petr Mladek <pmladek@suse.com>
+
+---
+ kernel/trace/ring_buffer.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 515be03e3009..966128f02121 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1164,6 +1164,11 @@ static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
+ struct buffer_page *bpage, *tmp;
+ long i;
+
++ /* Check if the available memory is there first */
++ i = si_mem_available();
++ if (i < nr_pages)
++ return -ENOMEM;
++
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page;
+ /*
+--
+2.16.4
+
diff --git a/patches.fixes/tracing-hrtimer-Fix-tracing-bugs-by-taking-all-clock.patch b/patches.fixes/tracing-hrtimer-Fix-tracing-bugs-by-taking-all-clock.patch
new file mode 100644
index 0000000000..74b1590b88
--- /dev/null
+++ b/patches.fixes/tracing-hrtimer-Fix-tracing-bugs-by-taking-all-clock.patch
@@ -0,0 +1,73 @@
+From 91633eed73a3ac37aaece5c8c1f93a18bae616a9 Mon Sep 17 00:00:00 2001
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Thu, 21 Dec 2017 11:41:37 +0100
+Subject: [PATCH] tracing/hrtimer: Fix tracing bugs by taking all clock bases
+ and modes into account
+Git-commit: 91633eed73a3ac37aaece5c8c1f93a18bae616a9
+Patch-mainline: v4.16-rc1
+References: bsc#1132527
+
+So far only CLOCK_MONOTONIC and CLOCK_REALTIME were taken into account as
+well as HRTIMER_MODE_ABS/REL in the hrtimer_init tracepoint. The query for
+detecting the ABS or REL timer modes is not valid anymore, it got broken
+by the introduction of HRTIMER_MODE_PINNED.
+
+HRTIMER_MODE_PINNED is not evaluated in the hrtimer_init() call, but for the
+sake of completeness print all given modes.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: John Stultz <john.stultz@linaro.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: keescook@chromium.org
+Link: http://lkml.kernel.org/r/20171221104205.7269-9-anna-maria@linutronix.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Petr Mladek <pmladek@suse.com>
+
+---
+ include/trace/events/timer.h | 20 ++++++++++++++++----
+ 1 file changed, 16 insertions(+), 4 deletions(-)
+
+diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
+index 16e305e69f34..c6f728037c53 100644
+--- a/include/trace/events/timer.h
++++ b/include/trace/events/timer.h
+@@ -136,6 +136,20 @@ DEFINE_EVENT(timer_class, timer_cancel,
+ TP_ARGS(timer)
+ );
+
++#define decode_clockid(type) \
++ __print_symbolic(type, \
++ { CLOCK_REALTIME, "CLOCK_REALTIME" }, \
++ { CLOCK_MONOTONIC, "CLOCK_MONOTONIC" }, \
++ { CLOCK_BOOTTIME, "CLOCK_BOOTTIME" }, \
++ { CLOCK_TAI, "CLOCK_TAI" })
++
++#define decode_hrtimer_mode(mode) \
++ __print_symbolic(mode, \
++ { HRTIMER_MODE_ABS, "ABS" }, \
++ { HRTIMER_MODE_REL, "REL" }, \
++ { HRTIMER_MODE_ABS_PINNED, "ABS|PINNED" }, \
++ { HRTIMER_MODE_REL_PINNED, "REL|PINNED" })
++
+ /**
+ * hrtimer_init - called when the hrtimer is initialized
+ * @hrtimer: pointer to struct hrtimer
+@@ -162,10 +176,8 @@ TRACE_EVENT(hrtimer_init,
+ ),
+
+ TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer,
+- __entry->clockid == CLOCK_REALTIME ?
+- "CLOCK_REALTIME" : "CLOCK_MONOTONIC",
+- __entry->mode == HRTIMER_MODE_ABS ?
+- "HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL")
++ decode_clockid(__entry->clockid),
++ decode_hrtimer_mode(__entry->mode))
+ );
+
+ /**
+--
+2.16.4
+
diff --git a/patches.fixes/vfs-Avoid-softlockups-in-drop_pagecache_sb.patch b/patches.fixes/vfs-Avoid-softlockups-in-drop_pagecache_sb.patch
index 2d5366d23d..aafb05650c 100644
--- a/patches.fixes/vfs-Avoid-softlockups-in-drop_pagecache_sb.patch
+++ b/patches.fixes/vfs-Avoid-softlockups-in-drop_pagecache_sb.patch
@@ -2,7 +2,8 @@ From 9a6b7c0ff64945366c05338873e2456b990948e9 Mon Sep 17 00:00:00 2001
From: Jan Kara <jack@suse.cz>
Date: Thu, 6 Dec 2018 17:06:08 +0100
Subject: [PATCH] vfs: Avoid softlockups in drop_pagecache_sb()
-Patch-mainline: Submitted, Jan 14 2019
+Patch-mainline: v5.0-rc5
+Git-commit: c27d82f52f75fc9d8d9d40d120d2a96fdeeada5e
References: bsc#1118505
When superblock has lots of inodes without any pagecache (like is the
diff --git a/patches.fixes/vfs-exit-early-from-zero-length-remap-operations.patch b/patches.fixes/vfs-exit-early-from-zero-length-remap-operations.patch
new file mode 100644
index 0000000000..1cc7592bc2
--- /dev/null
+++ b/patches.fixes/vfs-exit-early-from-zero-length-remap-operations.patch
@@ -0,0 +1,32 @@
+From 2c5773f102c9bb07d5328467f61f0a88f2f2892d Mon Sep 17 00:00:00 2001
+From: "Darrick J. Wong" <darrick.wong@oracle.com>
+Date: Tue, 30 Oct 2018 10:40:39 +1100
+Subject: [PATCH] vfs: exit early from zero length remap operations
+Git-commit: 2c5773f102c9bb07d5328467f61f0a88f2f2892d
+Patch-mainline: v4.20-rc1
+References: bsc#1132411, bsc#1132219
+
+If a remap caller asks us to remap to the source file's EOF and the
+source file length leaves us with a zero byte request, exit early.
+
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Dave Chinner <david@fromorbit.com>
+Acked-by: Anthony Iliopoulos <ailiopoulos@suse.com>
+
+---
+ fs/read_write.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/read_write.c
++++ b/fs/read_write.c
+@@ -1760,6 +1760,8 @@
+ if (pos_in > isize)
+ return -EINVAL;
+ *len = isize - pos_in;
++ if (*len == 0)
++ return 0;
+ }
+
+ /* Ensure offsets don't wrap and the input is inside i_size */
+
diff --git a/patches.fixes/vfs-limit-size-of-dedupe.patch b/patches.fixes/vfs-limit-size-of-dedupe.patch
new file mode 100644
index 0000000000..180173c010
--- /dev/null
+++ b/patches.fixes/vfs-limit-size-of-dedupe.patch
@@ -0,0 +1,33 @@
+From 92b66d2cdd7a4f6f6aa31be5f16a3f0c88902690 Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@redhat.com>
+Date: Fri, 6 Jul 2018 23:57:02 +0200
+Subject: [PATCH] vfs: limit size of dedupe
+Git-commit: 92b66d2cdd7a4f6f6aa31be5f16a3f0c88902690
+Patch-mainline: v4.19-rc1
+References: bsc#1132397, bsc#1132219
+
+Suggested-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Acked-by: Anthony Iliopoulos <ailiopoulos@suse.com>
+
+---
+ fs/read_write.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/fs/read_write.c b/fs/read_write.c
+index 153f8f690490..f43bb12b4759 100644
+--- a/fs/read_write.c
++++ b/fs/read_write.c
+@@ -2003,6 +2003,9 @@ int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
+ if (off + len > i_size_read(src))
+ return -EINVAL;
+
++ /* Arbitrary 1G limit on a single dedupe request, can be raised. */
++ len = min_t(u64, len, 1 << 30);
++
+ /* pre-format output fields to sane values */
+ for (i = 0; i < count; i++) {
+ same->info[i].bytes_deduped = 0ULL;
+--
+2.16.4
+
diff --git a/patches.fixes/xfs-allow-xfs_lock_two_inodes-to-take-different-EXCL.patch b/patches.fixes/xfs-allow-xfs_lock_two_inodes-to-take-different-EXCL.patch
new file mode 100644
index 0000000000..ff6f9e9242
--- /dev/null
+++ b/patches.fixes/xfs-allow-xfs_lock_two_inodes-to-take-different-EXCL.patch
@@ -0,0 +1,193 @@
+From 7c2d238ac6c435c07780a54719760da2beb46a43 Mon Sep 17 00:00:00 2001
+From: "Darrick J. Wong" <darrick.wong@oracle.com>
+Date: Fri, 26 Jan 2018 15:27:33 -0800
+Subject: [PATCH] xfs: allow xfs_lock_two_inodes to take different EXCL/SHARED
+ modes
+Git-commit: 7c2d238ac6c435c07780a54719760da2beb46a43
+Patch-mainline: v4.16-rc1
+References: bsc#1132370, bsc#1132219
+
+Refactor xfs_lock_two_inodes to take separate locking modes for each
+inode. Specifically, this enables us to take a SHARED lock on one inode
+and an EXCL lock on the other. The lock class (MMAPLOCK/ILOCK) must be
+the same for each inode.
+
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Acked-by: Anthony Iliopoulos <ailiopoulos@suse.com>
+
+---
+ fs/xfs/xfs_bmap_util.c | 4 ++--
+ fs/xfs/xfs_inode.c | 49 ++++++++++++++++++++++++++++++++-----------------
+ fs/xfs/xfs_inode.h | 3 ++-
+ fs/xfs/xfs_reflink.c | 5 +++--
+ 4 files changed, 39 insertions(+), 22 deletions(-)
+
+diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
+index 6d37ab43195f..c83f549dc17b 100644
+--- a/fs/xfs/xfs_bmap_util.c
++++ b/fs/xfs/xfs_bmap_util.c
+@@ -1872,7 +1872,7 @@ xfs_swap_extents(
+ */
+ lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
+ lock_flags = XFS_MMAPLOCK_EXCL;
+- xfs_lock_two_inodes(ip, tip, XFS_MMAPLOCK_EXCL);
++ xfs_lock_two_inodes(ip, XFS_MMAPLOCK_EXCL, tip, XFS_MMAPLOCK_EXCL);
+
+ /* Verify that both files have the same format */
+ if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
+@@ -1919,7 +1919,7 @@ xfs_swap_extents(
+ * Lock and join the inodes to the tansaction so that transaction commit
+ * or cancel will unlock the inodes from this point onwards.
+ */
+- xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
++ xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
+ lock_flags |= XFS_ILOCK_EXCL;
+ xfs_trans_ijoin(tp, ip, 0);
+ xfs_trans_ijoin(tp, tip, 0);
+diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
+index 5366fb619db6..e7f6d5291a7a 100644
+--- a/fs/xfs/xfs_inode.c
++++ b/fs/xfs/xfs_inode.c
+@@ -546,23 +546,36 @@ xfs_lock_inodes(
+
+ /*
+ * xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
+- * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
+- * lock more than one at a time, lockdep will report false positives saying we
+- * have violated locking orders.
++ * the mmaplock or the ilock, but not more than one type at a time. If we lock
++ * more than one at a time, lockdep will report false positives saying we have
++ * violated locking orders. The iolock must be double-locked separately since
++ * we use i_rwsem for that. We now support taking one lock EXCL and the other
++ * SHARED.
+ */
+ void
+ xfs_lock_two_inodes(
+- xfs_inode_t *ip0,
+- xfs_inode_t *ip1,
+- uint lock_mode)
++ struct xfs_inode *ip0,
++ uint ip0_mode,
++ struct xfs_inode *ip1,
++ uint ip1_mode)
+ {
+- xfs_inode_t *temp;
++ struct xfs_inode *temp;
++ uint mode_temp;
+ int attempts = 0;
+ xfs_log_item_t *lp;
+
+- ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
+- if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL))
+- ASSERT(!(lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
++ ASSERT(hweight32(ip0_mode) == 1);
++ ASSERT(hweight32(ip1_mode) == 1);
++ ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
++ ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
++ ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
++ !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
++ ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
++ !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
++ ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
++ !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
++ ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
++ !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
+
+ ASSERT(ip0->i_ino != ip1->i_ino);
+
+@@ -570,10 +583,13 @@ xfs_lock_two_inodes(
+ temp = ip0;
+ ip0 = ip1;
+ ip1 = temp;
++ mode_temp = ip0_mode;
++ ip0_mode = ip1_mode;
++ ip1_mode = mode_temp;
+ }
+
+ again:
+- xfs_ilock(ip0, xfs_lock_inumorder(lock_mode, 0));
++ xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
+
+ /*
+ * If the first lock we have locked is in the AIL, we must TRY to get
+@@ -582,18 +598,17 @@ xfs_lock_two_inodes(
+ */
+ lp = (xfs_log_item_t *)ip0->i_itemp;
+ if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
+- if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(lock_mode, 1))) {
+- xfs_iunlock(ip0, lock_mode);
++ if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
++ xfs_iunlock(ip0, ip0_mode);
+ if ((++attempts % 5) == 0)
+ delay(1); /* Don't just spin the CPU */
+ goto again;
+ }
+ } else {
+- xfs_ilock(ip1, xfs_lock_inumorder(lock_mode, 1));
++ xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
+ }
+ }
+
+-
+ void
+ __xfs_iflock(
+ struct xfs_inode *ip)
+@@ -1421,7 +1436,7 @@ xfs_link(
+ if (error)
+ goto std_return;
+
+- xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
++ xfs_lock_two_inodes(sip, XFS_ILOCK_EXCL, tdp, XFS_ILOCK_EXCL);
+
+ xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
+@@ -2585,7 +2600,7 @@ xfs_remove(
+ goto std_return;
+ }
+
+- xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL);
++ xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
+
+ xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
+index 386b0bb3c92a..3e8dc990d41c 100644
+--- a/fs/xfs/xfs_inode.h
++++ b/fs/xfs/xfs_inode.h
+@@ -423,7 +423,8 @@ void xfs_iunpin_wait(xfs_inode_t *);
+ #define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount))
+
+ int xfs_iflush(struct xfs_inode *, struct xfs_buf **);
+-void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
++void xfs_lock_two_inodes(struct xfs_inode *ip0, uint ip0_mode,
++ struct xfs_inode *ip1, uint ip1_mode);
+
+ xfs_extlen_t xfs_get_extsz_hint(struct xfs_inode *ip);
+ xfs_extlen_t xfs_get_cowextsz_hint(struct xfs_inode *ip);
+diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
+index bac464f0bc59..bcc58c24287c 100644
+--- a/fs/xfs/xfs_reflink.c
++++ b/fs/xfs/xfs_reflink.c
+@@ -944,7 +944,7 @@ xfs_reflink_set_inode_flag(
+ if (src->i_ino == dest->i_ino)
+ xfs_ilock(src, XFS_ILOCK_EXCL);
+ else
+- xfs_lock_two_inodes(src, dest, XFS_ILOCK_EXCL);
++ xfs_lock_two_inodes(src, XFS_ILOCK_EXCL, dest, XFS_ILOCK_EXCL);
+
+ if (!xfs_is_reflink_inode(src)) {
+ trace_xfs_reflink_set_inode_flag(src);
+@@ -1324,7 +1324,8 @@ xfs_reflink_remap_range(
+ if (same_inode)
+ xfs_ilock(src, XFS_MMAPLOCK_EXCL);
+ else
+- xfs_lock_two_inodes(src, dest, XFS_MMAPLOCK_EXCL);
++ xfs_lock_two_inodes(src, XFS_MMAPLOCK_EXCL, dest,
++ XFS_MMAPLOCK_EXCL);
+
+ /* Check file eligibility and prepare for block sharing. */
+ ret = -EINVAL;
+--
+2.16.4
+
diff --git a/patches.fixes/xfs-call-xfs_qm_dqattach-before-performing-reflink-o.patch b/patches.fixes/xfs-call-xfs_qm_dqattach-before-performing-reflink-o.patch
new file mode 100644
index 0000000000..28ae42f293
--- /dev/null
+++ b/patches.fixes/xfs-call-xfs_qm_dqattach-before-performing-reflink-o.patch
@@ -0,0 +1,36 @@
+From 09ac862397041fc484cd7294b15d41073aa78864 Mon Sep 17 00:00:00 2001
+From: "Darrick J. Wong" <darrick.wong@oracle.com>
+Date: Fri, 19 Jan 2018 08:56:04 -0800
+Subject: [PATCH] xfs: call xfs_qm_dqattach before performing reflink
+ operations
+Git-commit: 09ac862397041fc484cd7294b15d41073aa78864
+Patch-mainline: v4.16-rc1
+References: bsc#1132368, bsc#1132219
+
+Ensure that we've attached all the necessary dquots before performing
+reflink operations so that quota accounting is accurate.
+
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Brian Foster <bfoster@redhat.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Acked-by: Anthony Iliopoulos <ailiopoulos@suse.com>
+
+---
+ fs/xfs/xfs_reflink.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/fs/xfs/xfs_reflink.c
++++ b/fs/xfs/xfs_reflink.c
+@@ -1286,6 +1286,11 @@
+ if (ret <= 0)
+ goto out_unlock;
+
++ /* Attach dquots to dest inode before changing block map */
++ ret = xfs_qm_dqattach(dest, 0);
++ if (ret)
++ goto out_unlock;
++
+ trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
+
+ /*
+
diff --git a/patches.fixes/xfs-cap-the-length-of-deduplication-requests.patch b/patches.fixes/xfs-cap-the-length-of-deduplication-requests.patch
new file mode 100644
index 0000000000..f0c13059c1
--- /dev/null
+++ b/patches.fixes/xfs-cap-the-length-of-deduplication-requests.patch
@@ -0,0 +1,50 @@
+From 021ba8e98fe5c6691b3cc3669faafa02403aa211 Mon Sep 17 00:00:00 2001
+From: "Darrick J. Wong" <darrick.wong@oracle.com>
+Date: Mon, 16 Apr 2018 23:07:36 -0700
+Subject: [PATCH] xfs: cap the length of deduplication requests
+Git-commit: 021ba8e98fe5c6691b3cc3669faafa02403aa211
+Patch-mainline: v4.17-rc4
+References: bsc#1132373, bsc#1132219
+
+Since deduplication potentially has to read in all the pages in both
+files in order to compare the contents, cap the deduplication request
+length at MAX_RW_COUNT/2 (roughly 1GB) so that we have /some/ upper bound
+on the request length and can't just lock up the kernel forever. Found
+by running generic/304 after commit 1ddae54555b62 ("common/rc: add
+missing 'local' keywords").
+
+Reported-by: matorola@gmail.com
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Carlos Maiolino <cmaiolino@redhat.com>
+Acked-by: Anthony Iliopoulos <ailiopoulos@suse.com>
+
+---
+ fs/xfs/xfs_file.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
+index eed073cc4778..e70fb8ccecea 100644
+--- a/fs/xfs/xfs_file.c
++++ b/fs/xfs/xfs_file.c
+@@ -880,8 +880,18 @@ xfs_file_dedupe_range(
+ struct file *dst_file,
+ u64 dst_loff)
+ {
++ struct inode *srci = file_inode(src_file);
++ u64 max_dedupe;
+ int error;
+
++ /*
++ * Since we have to read all these pages in to compare them, cut
++ * it off at MAX_RW_COUNT/2 rounded down to the nearest block.
++ * That means we won't do more than MAX_RW_COUNT IO per request.
++ */
++ max_dedupe = (MAX_RW_COUNT >> 1) & ~(i_blocksize(srci) - 1);
++ if (len > max_dedupe)
++ len = max_dedupe;
+ error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff,
+ len, true);
+ if (error)
+--
+2.16.4
+
diff --git a/patches.fixes/xfs-clean-up-xfs_reflink_remap_blocks-call-site.patch b/patches.fixes/xfs-clean-up-xfs_reflink_remap_blocks-call-site.patch
new file mode 100644
index 0000000000..2abda01fb2
--- /dev/null
+++ b/patches.fixes/xfs-clean-up-xfs_reflink_remap_blocks-call-site.patch
@@ -0,0 +1,109 @@
+From 9f04aaffddb3e487f3eda1945f1a9531d6cc7628 Mon Sep 17 00:00:00 2001
+From: "Darrick J. Wong" <darrick.wong@oracle.com>
+Date: Tue, 30 Oct 2018 10:46:50 +1100
+Subject: [PATCH] xfs: clean up xfs_reflink_remap_blocks call site
+Git-commit: 9f04aaffddb3e487f3eda1945f1a9531d6cc7628
+Patch-mainline: v4.20-rc1
+References: bsc#1132413, bsc#1132219
+
+Move the offset <-> blocks unit conversions into
+xfs_reflink_remap_blocks to make the call site less ugly.
+
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Dave Chinner <dchinner@redhat.com>
+Signed-off-by: Dave Chinner <david@fromorbit.com>
+Acked-by: Anthony Iliopoulos <ailiopoulos@suse.com>
+
+---
+ fs/xfs/xfs_reflink.c | 37 ++++++++++++++++++-------------------
+ 1 file changed, 18 insertions(+), 19 deletions(-)
+
+--- a/fs/xfs/xfs_reflink.c
++++ b/fs/xfs/xfs_reflink.c
+@@ -1180,16 +1180,23 @@
+ STATIC int
+ xfs_reflink_remap_blocks(
+ struct xfs_inode *src,
+- xfs_fileoff_t srcoff,
++ loff_t pos_in,
+ struct xfs_inode *dest,
+- xfs_fileoff_t destoff,
+- xfs_filblks_t len,
+- xfs_off_t new_isize)
++ loff_t pos_out,
++ loff_t remap_len)
+ {
+ struct xfs_bmbt_irec imap;
++ xfs_fileoff_t srcoff;
++ xfs_fileoff_t destoff;
++ xfs_filblks_t len;
++ xfs_filblks_t range_len;
++ xfs_off_t new_isize = pos_out + remap_len;
+ int nimaps;
+ int error = 0;
+- xfs_filblks_t range_len;
++
++ destoff = XFS_B_TO_FSBT(src->i_mount, pos_out);
++ srcoff = XFS_B_TO_FSBT(src->i_mount, pos_in);
++ len = XFS_B_TO_FSB(src->i_mount, remap_len);
+
+ /* drange = (destoff, destoff + len); srange = (srcoff, srcoff + len) */
+ while (len) {
+@@ -1204,7 +1211,7 @@
+ error = xfs_bmapi_read(src, srcoff, len, &imap, &nimaps, 0);
+ xfs_iunlock(src, lock_mode);
+ if (error)
+- goto err;
++ break;
+ ASSERT(nimaps == 1);
+
+ trace_xfs_reflink_remap_imap(src, srcoff, len, XFS_IO_OVERWRITE,
+@@ -1218,11 +1225,11 @@
+ error = xfs_reflink_remap_extent(dest, &imap, destoff,
+ new_isize);
+ if (error)
+- goto err;
++ break;
+
+ if (fatal_signal_pending(current)) {
+ error = -EINTR;
+- goto err;
++ break;
+ }
+
+ /* Advance drange/srange */
+@@ -1231,10 +1238,8 @@
+ len -= range_len;
+ }
+
+- return 0;
+-
+-err:
+- trace_xfs_reflink_remap_blocks_error(dest, error, _RET_IP_);
++ if (error)
++ trace_xfs_reflink_remap_blocks_error(dest, error, _RET_IP_);
+ return error;
+ }
+
+@@ -1480,8 +1485,6 @@
+ struct inode *inode_out = file_inode(file_out);
+ struct xfs_inode *dest = XFS_I(inode_out);
+ struct xfs_mount *mp = src->i_mount;
+- xfs_fileoff_t sfsbno, dfsbno;
+- xfs_filblks_t fsblen;
+ xfs_extlen_t cowextsize;
+ ssize_t ret;
+
+@@ -1499,11 +1502,7 @@
+
+ trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
+
+- dfsbno = XFS_B_TO_FSBT(mp, pos_out);
+- sfsbno = XFS_B_TO_FSBT(mp, pos_in);
+- fsblen = XFS_B_TO_FSB(mp, len);
+- ret = xfs_reflink_remap_blocks(src, sfsbno, dest, dfsbno, fsblen,
+- pos_out + len);
++ ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len);
+ if (ret)
+ goto out_unlock;
+
diff --git a/patches.fixes/xfs-fix-data-corruption-w-unaligned-dedupe-ranges.patch b/patches.fixes/xfs-fix-data-corruption-w-unaligned-dedupe-ranges.patch
new file mode 100644
index 0000000000..b976644392
--- /dev/null
+++ b/patches.fixes/xfs-fix-data-corruption-w-unaligned-dedupe-ranges.patch
@@ -0,0 +1,70 @@
+From dceeb47b0ed65e14de53507a8a9c32a90831cfa1 Mon Sep 17 00:00:00 2001
+From: Dave Chinner <dchinner@redhat.com>
+Date: Sat, 6 Oct 2018 11:44:19 +1000
+Subject: [PATCH] xfs: fix data corruption w/ unaligned dedupe ranges
+Git-commit: dceeb47b0ed65e14de53507a8a9c32a90831cfa1
+Patch-mainline: v4.19-rc8
+References: bsc#1132405, bsc#1132219
+
+A deduplication data corruption is Exposed by fstests generic/505 on
+XFS. It is caused by extending the block match range to include the
+partial EOF block, but then allowing unknown data beyond EOF to be
+considered a "match" to data in the destination file because the
+comparison is only made to the end of the source file. This corrupts
+the destination file when the source extent is shared with it.
+
+XFS only supports whole block dedupe, but we still need to appear to
+support whole file dedupe correctly. Hence if the dedupe request
+includes the last block of the souce file, don't include it in the
+actual XFS dedupe operation. If the rest of the range dedupes
+successfully, then report the partial last block as deduped, too, so
+that userspace sees it as a successful dedupe rather than return
+EINVAL because we can't dedupe unaligned blocks.
+
+Signed-off-by: Dave Chinner <dchinner@redhat.com>
+Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Dave Chinner <david@fromorbit.com>
+Acked-by: Anthony Iliopoulos <ailiopoulos@suse.com>
+
+---
+ fs/xfs/xfs_reflink.c | 21 +++++++++++++++++++++
+ 1 file changed, 21 insertions(+)
+
+--- a/fs/xfs/xfs_reflink.c
++++ b/fs/xfs/xfs_reflink.c
+@@ -1307,6 +1307,19 @@
+ * will have the iolock and mmaplock held, the page cache of the out file
+ * will be truncated, and any leases on the out file will have been broken.
+ * This function borrows heavily from xfs_file_aio_write_checks.
++ *
++ * The VFS allows partial EOF blocks to "match" for dedupe even though it hasn't
++ * checked that the bytes beyond EOF physically match. Hence we cannot use the
++ * EOF block in the source dedupe range because it's not a complete block match,
++ * hence can introduce a corruption into the file that has it's
++ * block replaced.
++ *
++ * Despite this issue, we still need to report that range as successfully
++ * deduped to avoid confusing userspace with EINVAL errors on completely
++ * matching file data. The only time that an unaligned length will be passed to
++ * us is when it spans the EOF block of the source file, so if we simply mask it
++ * down to be block aligned here the we will dedupe everything but that partial
++ * EOF block.
+ */
+ STATIC int
+ xfs_reflink_remap_prep(
+@@ -1349,6 +1362,14 @@
+ if (ret <= 0)
+ goto out_unlock;
+
++ /*
++ * If the dedupe data matches, chop off the partial EOF block
++ * from the source file so we don't try to dedupe the partial
++ * EOF block.
++ */
++ if (is_dedupe)
++ *len &= ~((u64)i_blocksize(inode_in) - 1);
++
+ /* Attach dquots to dest inode before changing block map */
+ ret = xfs_qm_dqattach(dest, 0);
+ if (ret)
+
diff --git a/patches.fixes/xfs-fix-data-corruption-w-unaligned-reflink-ranges.patch b/patches.fixes/xfs-fix-data-corruption-w-unaligned-reflink-ranges.patch
new file mode 100644
index 0000000000..95abe10bad
--- /dev/null
+++ b/patches.fixes/xfs-fix-data-corruption-w-unaligned-reflink-ranges.patch
@@ -0,0 +1,109 @@
+From b39989009bdb84992915c9869f58094ed5becf10 Mon Sep 17 00:00:00 2001
+From: Dave Chinner <david@fromorbit.com>
+Date: Sat, 6 Oct 2018 11:44:39 +1000
+Subject: [PATCH] xfs: fix data corruption w/ unaligned reflink ranges
+Git-commit: b39989009bdb84992915c9869f58094ed5becf10
+Patch-mainline: v4.19-rc8
+References: bsc#1132407, bsc#1132219
+
+When reflinking sub-file ranges, a data corruption can occur when
+the source file range includes a partial EOF block. This shares the
+unknown data beyond EOF into the second file at a position inside
+EOF, exposing stale data in the second file.
+
+XFS only supports whole block sharing, but we still need to
+support whole file reflink correctly. Hence if the reflink
+request includes the last block of the souce file, only proceed with
+the reflink operation if it lands at or past the destination file's
+current EOF. If it lands within the destination file EOF, reject the
+entire request with -EINVAL and make the caller go the hard way.
+
+This avoids the data corruption vector, but also avoids disruption
+of returning EINVAL to userspace for the common case of whole file
+cloning.
+
+Signed-off-by: Dave Chinner <dchinner@redhat.com>
+Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Dave Chinner <david@fromorbit.com>
+Acked-by: Anthony Iliopoulos <ailiopoulos@suse.com>
+
+---
+ fs/xfs/xfs_reflink.c | 47 ++++++++++++++++++++++++++++++++++-------------
+ 1 file changed, 34 insertions(+), 13 deletions(-)
+
+--- a/fs/xfs/xfs_reflink.c
++++ b/fs/xfs/xfs_reflink.c
+@@ -1304,22 +1304,32 @@
+
+ /*
+ * Prepare two files for range cloning. Upon a successful return both inodes
+- * will have the iolock and mmaplock held, the page cache of the out file
+- * will be truncated, and any leases on the out file will have been broken.
+- * This function borrows heavily from xfs_file_aio_write_checks.
++ * will have the iolock and mmaplock held, the page cache of the out file will
++ * be truncated, and any leases on the out file will have been broken. This
++ * function borrows heavily from xfs_file_aio_write_checks.
+ *
+ * The VFS allows partial EOF blocks to "match" for dedupe even though it hasn't
+ * checked that the bytes beyond EOF physically match. Hence we cannot use the
+ * EOF block in the source dedupe range because it's not a complete block match,
+- * hence can introduce a corruption into the file that has it's
+- * block replaced.
++ * hence can introduce a corruption into the file that has it's block replaced.
+ *
+- * Despite this issue, we still need to report that range as successfully
+- * deduped to avoid confusing userspace with EINVAL errors on completely
+- * matching file data. The only time that an unaligned length will be passed to
+- * us is when it spans the EOF block of the source file, so if we simply mask it
+- * down to be block aligned here the we will dedupe everything but that partial
+- * EOF block.
++ * In similar fashion, the VFS file cloning also allows partial EOF blocks to be
++ * "block aligned" for the purposes of cloning entire files. However, if the
++ * source file range includes the EOF block and it lands within the existing EOF
++ * of the destination file, then we can expose stale data from beyond the source
++ * file EOF in the destination file.
++ *
++ * XFS doesn't support partial block sharing, so in both cases we have check
++ * these cases ourselves. For dedupe, we can simply round the length to dedupe
++ * down to the previous whole block and ignore the partial EOF block. While this
++ * means we can't dedupe the last block of a file, this is an acceptible
++ * tradeoff for simplicity on implementation.
++ *
++ * For cloning, we want to share the partial EOF block if it is also the new EOF
++ * block of the destination file. If the partial EOF block lies inside the
++ * existing destination EOF, then we have to abort the clone to avoid exposing
++ * stale data in the destination file. Hence we reject these clone attempts with
++ * -EINVAL in this case.
+ */
+ STATIC int
+ xfs_reflink_remap_prep(
+@@ -1335,6 +1345,7 @@
+ struct inode *inode_out = file_inode(file_out);
+ struct xfs_inode *dest = XFS_I(inode_out);
+ bool same_inode = (inode_in == inode_out);
++ u64 blkmask = i_blocksize(inode_in) - 1;
+ ssize_t ret;
+
+ /* Lock both files against IO */
+@@ -1367,8 +1378,18 @@
+ * from the source file so we don't try to dedupe the partial
+ * EOF block.
+ */
+- if (is_dedupe)
+- *len &= ~((u64)i_blocksize(inode_in) - 1);
++ if (is_dedupe) {
++ *len &= ~blkmask;
++ } else if (*len & blkmask) {
++ /*
++ * The user is attempting to share a partial EOF block,
++ * if it's inside the destination EOF then reject it.
++ */
++ if (pos_out + *len < i_size_read(inode_out)) {
++ ret = -EINVAL;
++ goto out_unlock;
++ }
++ }
+
+ /* Attach dquots to dest inode before changing block map */
+ ret = xfs_qm_dqattach(dest, 0);
+
diff --git a/patches.fixes/xfs-fix-pagecache-truncation-prior-to-reflink.patch b/patches.fixes/xfs-fix-pagecache-truncation-prior-to-reflink.patch
new file mode 100644
index 0000000000..c3ec0c32e0
--- /dev/null
+++ b/patches.fixes/xfs-fix-pagecache-truncation-prior-to-reflink.patch
@@ -0,0 +1,40 @@
+From 4918ef4ea008cd2ff47eb852894e3f9b9047f4f3 Mon Sep 17 00:00:00 2001
+From: "Darrick J. Wong" <darrick.wong@oracle.com>
+Date: Tue, 30 Oct 2018 10:46:33 +1100
+Subject: [PATCH] xfs: fix pagecache truncation prior to reflink
+Git-commit: 4918ef4ea008cd2ff47eb852894e3f9b9047f4f3
+Patch-mainline: v4.20-rc1
+References: bsc#1132412, bsc#1132219
+
+Prior to remapping blocks, it is necessary to remove pages from the
+destination file's page cache. Unfortunately, the truncation is not
+aggressive enough -- if page size > block size, we'll end up zeroing
+subpage blocks instead of removing them. So, round the start offset
+down and the end offset up to page boundaries. We already wrote all
+the dirty data so the larger range shouldn't be a problem.
+
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Dave Chinner <dchinner@redhat.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Dave Chinner <david@fromorbit.com>
+Acked-by: Anthony Iliopoulos <ailiopoulos@suse.com>
+
+---
+ fs/xfs/xfs_reflink.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/fs/xfs/xfs_reflink.c
++++ b/fs/xfs/xfs_reflink.c
+@@ -1432,8 +1432,9 @@
+ goto out_unlock;
+
+ /* Zap any page cache for the destination file's range. */
+- truncate_inode_pages_range(&inode_out->i_data, pos_out,
+- PAGE_ALIGN(pos_out + *len) - 1);
++ truncate_inode_pages_range(&inode_out->i_data,
++ round_down(pos_out, PAGE_SIZE),
++ round_up(pos_out + *len, PAGE_SIZE) - 1);
+
+ /* If we're altering the file contents... */
+ if (!is_dedupe) {
+
diff --git a/patches.fixes/xfs-flush-removing-page-cache-in-xfs_reflink_remap_p.patch b/patches.fixes/xfs-flush-removing-page-cache-in-xfs_reflink_remap_p.patch
new file mode 100644
index 0000000000..c52df06fab
--- /dev/null
+++ b/patches.fixes/xfs-flush-removing-page-cache-in-xfs_reflink_remap_p.patch
@@ -0,0 +1,94 @@
+From 2c307174ab77e34645e75e12827646e044d273c3 Mon Sep 17 00:00:00 2001
+From: Dave Chinner <dchinner@redhat.com>
+Date: Mon, 19 Nov 2018 13:31:10 -0800
+Subject: [PATCH] xfs: flush removing page cache in xfs_reflink_remap_prep
+Git-commit: 2c307174ab77e34645e75e12827646e044d273c3
+Patch-mainline: v4.20-rc4
+References: bsc#1132414, bsc#1132219
+
+On a sub-page block size filesystem, fsx is failing with a data
+corruption after a series of operations involving copying a file
+with the destination offset beyond EOF of the destination of the file:
+
+8093(157 mod 256): TRUNCATE DOWN from 0x7a120 to 0x50000 ******WWWW
+8094(158 mod 256): INSERT 0x25000 thru 0x25fff (0x1000 bytes)
+8095(159 mod 256): COPY 0x18000 thru 0x1afff (0x3000 bytes) to 0x2f400
+8096(160 mod 256): WRITE 0x5da00 thru 0x651ff (0x7800 bytes) HOLE
+8097(161 mod 256): COPY 0x2000 thru 0x5fff (0x4000 bytes) to 0x6fc00
+
+The second copy here is beyond EOF, and it is to sub-page (4k) but
+block aligned (1k) offset. The clone runs the EOF zeroing, landing
+in a pre-existing post-eof delalloc extent. This zeroes the post-eof
+extents in the page cache just fine, dirtying the pages correctly.
+
+The problem is that xfs_reflink_remap_prep() now truncates the page
+cache over the range that it is copying it to, and rounds that down
+to cover the entire start page. This removes the dirty page over the
+delalloc extent from the page cache without having written it back.
+Hence later, when the page cache is flushed, the page at offset
+0x6f000 has not been written back and hence exposes stale data,
+which fsx trips over less than 10 operations later.
+
+Fix this by changing xfs_reflink_remap_prep() to use
+xfs_flush_unmap_range().
+
+Signed-off-by: Dave Chinner <dchinner@redhat.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Acked-by: Anthony Iliopoulos <ailiopoulos@suse.com>
+
+---
+ fs/xfs/xfs_bmap_util.c | 2 +-
+ fs/xfs/xfs_bmap_util.h | 3 +++
+ fs/xfs/xfs_reflink.c | 17 +++++++++++++----
+ 3 files changed, 17 insertions(+), 5 deletions(-)
+
+--- a/fs/xfs/xfs_bmap_util.c
++++ b/fs/xfs/xfs_bmap_util.c
+@@ -1112,7 +1112,7 @@
+ return 0;
+ }
+
+-static int
++int
+ xfs_flush_unmap_range(
+ struct xfs_inode *ip,
+ xfs_off_t offset,
+--- a/fs/xfs/xfs_bmap_util.h
++++ b/fs/xfs/xfs_bmap_util.h
+@@ -87,4 +87,7 @@
+
+ xfs_daddr_t xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb);
+ xfs_extnum_t xfs_bmap_count_leaves(struct xfs_ifork *ifp, xfs_filblks_t *count);
++
++int xfs_flush_unmap_range(struct xfs_inode *ip, xfs_off_t offset,
++ xfs_off_t len);
+ #endif /* __XFS_BMAP_UTIL_H__ */
+--- a/fs/xfs/xfs_reflink.c
++++ b/fs/xfs/xfs_reflink.c
+@@ -1436,10 +1436,19 @@
+ if (ret)
+ goto out_unlock;
+
+- /* Zap any page cache for the destination file's range. */
+- truncate_inode_pages_range(&inode_out->i_data,
+- round_down(pos_out, PAGE_SIZE),
+- round_up(pos_out + *len, PAGE_SIZE) - 1);
++ /*
++ * If pos_out > EOF, we may have dirtied blocks between EOF and
++ * pos_out. In that case, we need to extend the flush and unmap to cover
++ * from EOF to the end of the copy length.
++ */
++ if (pos_out > XFS_ISIZE(dest)) {
++ loff_t flen = *len + (pos_out - XFS_ISIZE(dest));
++ ret = xfs_flush_unmap_range(dest, XFS_ISIZE(dest), flen);
++ } else {
++ ret = xfs_flush_unmap_range(dest, pos_out, *len);
++ }
++ if (ret)
++ goto out_unlock;
+
+ /* If we're altering the file contents... */
+ if (!is_dedupe) {
+
diff --git a/patches.fixes/xfs-only-grab-shared-inode-locks-for-source-file-dur.patch b/patches.fixes/xfs-only-grab-shared-inode-locks-for-source-file-dur.patch
new file mode 100644
index 0000000000..598fcc45cb
--- /dev/null
+++ b/patches.fixes/xfs-only-grab-shared-inode-locks-for-source-file-dur.patch
@@ -0,0 +1,122 @@
+From 01c2e13dcae9757ea987af8933f9fcc6e33f2d7c Mon Sep 17 00:00:00 2001
+From: "Darrick J. Wong" <darrick.wong@oracle.com>
+Date: Thu, 18 Jan 2018 14:07:53 -0800
+Subject: [PATCH] xfs: only grab shared inode locks for source file during
+ reflink
+Git-commit: 01c2e13dcae9757ea987af8933f9fcc6e33f2d7c
+Patch-mainline: v4.16-rc1
+References: bsc#1132372, bsc#1132219
+
+Reflink and dedupe operations remap blocks from a source file into a
+destination file. The destination file needs exclusive locks on all
+levels because we're updating its block map, but the source file isn't
+undergoing any block map changes so we can use a shared lock.
+
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Acked-by: Anthony Iliopoulos <ailiopoulos@suse.com>
+
+---
+ fs/xfs/xfs_reflink.c | 25 +++++++++++++++----------
+ include/linux/fs.h | 5 +++++
+ 2 files changed, 20 insertions(+), 10 deletions(-)
+
+diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
+index bcc58c24287c..85a119e1463b 100644
+--- a/fs/xfs/xfs_reflink.c
++++ b/fs/xfs/xfs_reflink.c
+@@ -1202,13 +1202,16 @@ xfs_reflink_remap_blocks(
+
+ /* drange = (destoff, destoff + len); srange = (srcoff, srcoff + len) */
+ while (len) {
++ uint lock_mode;
++
+ trace_xfs_reflink_remap_blocks_loop(src, srcoff, len,
+ dest, destoff);
++
+ /* Read extent from the source file */
+ nimaps = 1;
+- xfs_ilock(src, XFS_ILOCK_EXCL);
++ lock_mode = xfs_ilock_data_map_shared(src);
+ error = xfs_bmapi_read(src, srcoff, len, &imap, &nimaps, 0);
+- xfs_iunlock(src, XFS_ILOCK_EXCL);
++ xfs_iunlock(src, lock_mode);
+ if (error)
+ goto err;
+ ASSERT(nimaps == 1);
+@@ -1260,7 +1263,7 @@ xfs_iolock_two_inodes_and_break_layout(
+
+ retry:
+ if (src < dest) {
+- inode_lock(src);
++ inode_lock_shared(src);
+ inode_lock_nested(dest, I_MUTEX_NONDIR2);
+ } else {
+ /* src >= dest */
+@@ -1271,7 +1274,7 @@ xfs_iolock_two_inodes_and_break_layout(
+ if (error == -EWOULDBLOCK) {
+ inode_unlock(dest);
+ if (src < dest)
+- inode_unlock(src);
++ inode_unlock_shared(src);
+ error = break_layout(dest, true);
+ if (error)
+ return error;
+@@ -1280,11 +1283,11 @@ xfs_iolock_two_inodes_and_break_layout(
+ if (error) {
+ inode_unlock(dest);
+ if (src < dest)
+- inode_unlock(src);
++ inode_unlock_shared(src);
+ return error;
+ }
+ if (src > dest)
+- inode_lock_nested(src, I_MUTEX_NONDIR2);
++ inode_lock_shared_nested(src, I_MUTEX_NONDIR2);
+ return 0;
+ }
+
+@@ -1324,7 +1327,7 @@ xfs_reflink_remap_range(
+ if (same_inode)
+ xfs_ilock(src, XFS_MMAPLOCK_EXCL);
+ else
+- xfs_lock_two_inodes(src, XFS_MMAPLOCK_EXCL, dest,
++ xfs_lock_two_inodes(src, XFS_MMAPLOCK_SHARED, dest,
+ XFS_MMAPLOCK_EXCL);
+
+ /* Check file eligibility and prepare for block sharing. */
+@@ -1393,10 +1396,12 @@ xfs_reflink_remap_range(
+ is_dedupe);
+
+ out_unlock:
+- xfs_iunlock(src, XFS_MMAPLOCK_EXCL);
++ xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
++ if (!same_inode)
++ xfs_iunlock(src, XFS_MMAPLOCK_SHARED);
++ inode_unlock(inode_out);
+ if (!same_inode)
+- xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
+- unlock_two_nondirectories(inode_in, inode_out);
++ inode_unlock_shared(inode_in);
+ if (ret)
+ trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
+ return ret;
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 7f8d96d68f34..5cbeab8a63ca 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -748,6 +748,11 @@ static inline void inode_lock_nested(struct inode *inode, unsigned subclass)
+ down_write_nested(&inode->i_rwsem, subclass);
+ }
+
++static inline void inode_lock_shared_nested(struct inode *inode, unsigned subclass)
++{
++ down_read_nested(&inode->i_rwsem, subclass);
++}
++
+ void lock_two_nondirectories(struct inode *, struct inode*);
+ void unlock_two_nondirectories(struct inode *, struct inode*);
+
+--
+2.16.4
+
diff --git a/patches.fixes/xfs-refactor-clonerange-preparation-into-a-separate-.patch b/patches.fixes/xfs-refactor-clonerange-preparation-into-a-separate-.patch
new file mode 100644
index 0000000000..2548502c32
--- /dev/null
+++ b/patches.fixes/xfs-refactor-clonerange-preparation-into-a-separate-.patch
@@ -0,0 +1,192 @@
+From 0d41e1d28c2e969094ef7933b8521f1e08d30251 Mon Sep 17 00:00:00 2001
+From: "Darrick J. Wong" <darrick.wong@oracle.com>
+Date: Fri, 5 Oct 2018 19:04:22 +1000
+Subject: [PATCH] xfs: refactor clonerange preparation into a separate helper
+Git-commit: 0d41e1d28c2e969094ef7933b8521f1e08d30251
+Patch-mainline: v4.19-rc8
+References: bsc#1132402, bsc#1132219
+
+Refactor all the reflink preparation steps into a separate helper
+that we'll use to land all the upcoming fixes for insufficient input
+checks.
+
+This rework also moves the invalidation of the destination range to
+the prep function so that it is done before the range is remapped.
+This ensures that nobody can access the data in range being remapped
+until the remap is complete.
+
+[dgc: fix xfs_reflink_remap_prep() return value and caller check to
+handle vfs_clone_file_prep_inodes() returning 0 to mean "nothing to
+do". ]
+
+[dgc: make sure length changed by vfs_clone_file_prep_inodes() gets
+propagated back to XFS code that does the remapping. ]
+
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Dave Chinner <dchinner@redhat.com>
+Signed-off-by: Dave Chinner <david@fromorbit.com>
+Acked-by: Anthony Iliopoulos <ailiopoulos@suse.com>
+
+---
+ fs/xfs/xfs_reflink.c | 100 +++++++++++++++++++++++++++++++++++++--------------
+ 1 file changed, 73 insertions(+), 27 deletions(-)
+
+--- a/fs/xfs/xfs_reflink.c
++++ b/fs/xfs/xfs_reflink.c
+@@ -1282,35 +1282,47 @@
+ return 0;
+ }
+
++/* Unlock both inodes after they've been prepped for a range clone. */
++STATIC void
++xfs_reflink_remap_unlock(
++ struct file *file_in,
++ struct file *file_out)
++{
++ struct inode *inode_in = file_inode(file_in);
++ struct xfs_inode *src = XFS_I(inode_in);
++ struct inode *inode_out = file_inode(file_out);
++ struct xfs_inode *dest = XFS_I(inode_out);
++ bool same_inode = (inode_in == inode_out);
++
++ xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
++ if (!same_inode)
++ xfs_iunlock(src, XFS_MMAPLOCK_SHARED);
++ inode_unlock(inode_out);
++ if (!same_inode)
++ inode_unlock_shared(inode_in);
++}
++
+ /*
+- * Link a range of blocks from one file to another.
++ * Prepare two files for range cloning. Upon a successful return both inodes
++ * will have the iolock and mmaplock held, the page cache of the out file
++ * will be truncated, and any leases on the out file will have been broken.
+ */
+-int
+-xfs_reflink_remap_range(
++STATIC int
++xfs_reflink_remap_prep(
+ struct file *file_in,
+ loff_t pos_in,
+ struct file *file_out,
+ loff_t pos_out,
+- u64 len,
++ u64 *len,
+ bool is_dedupe)
+ {
+ struct inode *inode_in = file_inode(file_in);
+ struct xfs_inode *src = XFS_I(inode_in);
+ struct inode *inode_out = file_inode(file_out);
+ struct xfs_inode *dest = XFS_I(inode_out);
+- struct xfs_mount *mp = src->i_mount;
+ bool same_inode = (inode_in == inode_out);
+- xfs_fileoff_t sfsbno, dfsbno;
+- xfs_filblks_t fsblen;
+- xfs_extlen_t cowextsize;
+ ssize_t ret;
+
+- if (!xfs_sb_version_hasreflink(&mp->m_sb))
+- return -EOPNOTSUPP;
+-
+- if (XFS_FORCED_SHUTDOWN(mp))
+- return -EIO;
+-
+ /* Lock both files against IO */
+ ret = xfs_iolock_two_inodes_and_break_layout(inode_in, inode_out);
+ if (ret)
+@@ -1332,7 +1344,7 @@
+ goto out_unlock;
+
+ ret = vfs_clone_file_prep_inodes(inode_in, pos_in, inode_out, pos_out,
+- &len, is_dedupe);
++ len, is_dedupe);
+ if (ret <= 0)
+ goto out_unlock;
+
+@@ -1341,8 +1353,6 @@
+ if (ret)
+ goto out_unlock;
+
+- trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
+-
+ /*
+ * Clear out post-eof preallocations because we don't have page cache
+ * backing the delayed allocations and they'll never get freed on
+@@ -1359,6 +1369,51 @@
+ if (ret)
+ goto out_unlock;
+
++ /* Zap any page cache for the destination file's range. */
++ truncate_inode_pages_range(&inode_out->i_data, pos_out,
++ PAGE_ALIGN(pos_out + *len) - 1);
++ return 1;
++out_unlock:
++ xfs_reflink_remap_unlock(file_in, file_out);
++ return ret;
++}
++
++/*
++ * Link a range of blocks from one file to another.
++ */
++int
++xfs_reflink_remap_range(
++ struct file *file_in,
++ loff_t pos_in,
++ struct file *file_out,
++ loff_t pos_out,
++ u64 len,
++ bool is_dedupe)
++{
++ struct inode *inode_in = file_inode(file_in);
++ struct xfs_inode *src = XFS_I(inode_in);
++ struct inode *inode_out = file_inode(file_out);
++ struct xfs_inode *dest = XFS_I(inode_out);
++ struct xfs_mount *mp = src->i_mount;
++ xfs_fileoff_t sfsbno, dfsbno;
++ xfs_filblks_t fsblen;
++ xfs_extlen_t cowextsize;
++ ssize_t ret;
++
++ if (!xfs_sb_version_hasreflink(&mp->m_sb))
++ return -EOPNOTSUPP;
++
++ if (XFS_FORCED_SHUTDOWN(mp))
++ return -EIO;
++
++ /* Prepare and then clone file data. */
++ ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
++ &len, is_dedupe);
++ if (ret <= 0)
++ return ret;
++
++ trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
++
+ dfsbno = XFS_B_TO_FSBT(mp, pos_out);
+ sfsbno = XFS_B_TO_FSBT(mp, pos_in);
+ fsblen = XFS_B_TO_FSB(mp, len);
+@@ -1367,10 +1422,6 @@
+ if (ret)
+ goto out_unlock;
+
+- /* Zap any page cache for the destination file's range. */
+- truncate_inode_pages_range(&inode_out->i_data, pos_out,
+- PAGE_ALIGN(pos_out + len) - 1);
+-
+ /*
+ * Carry the cowextsize hint from src to dest if we're sharing the
+ * entire source file to the entire destination file, the source file
+@@ -1387,12 +1438,7 @@
+ is_dedupe);
+
+ out_unlock:
+- xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
+- if (!same_inode)
+- xfs_iunlock(src, XFS_MMAPLOCK_SHARED);
+- inode_unlock(inode_out);
+- if (!same_inode)
+- inode_unlock_shared(inode_in);
++ xfs_reflink_remap_unlock(file_in, file_out);
+ if (ret)
+ trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
+ return ret;
diff --git a/patches.fixes/xfs-reflink-find-shared-should-take-a-transaction.patch b/patches.fixes/xfs-reflink-find-shared-should-take-a-transaction.patch
new file mode 100644
index 0000000000..0b3532dab7
--- /dev/null
+++ b/patches.fixes/xfs-reflink-find-shared-should-take-a-transaction.patch
@@ -0,0 +1,121 @@
+From 92ff7285f1df5590d53ab7031f0b86552b59121d Mon Sep 17 00:00:00 2001
+From: "Darrick J. Wong" <darrick.wong@oracle.com>
+Date: Fri, 16 Jun 2017 11:00:10 -0700
+Subject: [PATCH] xfs: reflink find shared should take a transaction
+Git-commit: 92ff7285f1df5590d53ab7031f0b86552b59121d
+Patch-mainline: v4.13-rc1
+References: bsc#1132226, bsc#1132219
+
+Adapt _reflink_find_shared to take an optional transaction pointer. The
+inode scrubber code will need to decide (within transaction context) if
+a file has shared blocks. To avoid buffer deadlocks, we must pass the
+tp through to this function's utility calls.
+
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Brian Foster <bfoster@redhat.com>
+Acked-by: Anthony Iliopoulos <ailiopoulos@suse.com>
+
+---
+ fs/xfs/xfs_bmap_util.c | 4 ++--
+ fs/xfs/xfs_reflink.c | 15 ++++++++-------
+ fs/xfs/xfs_reflink.h | 6 +++---
+ 3 files changed, 13 insertions(+), 12 deletions(-)
+
+diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
+index 0ea70a44c1a7..863180c41858 100644
+--- a/fs/xfs/xfs_bmap_util.c
++++ b/fs/xfs/xfs_bmap_util.c
+@@ -455,8 +455,8 @@ xfs_getbmap_adjust_shared(
+
+ agno = XFS_FSB_TO_AGNO(mp, map->br_startblock);
+ agbno = XFS_FSB_TO_AGBNO(mp, map->br_startblock);
+- error = xfs_reflink_find_shared(mp, agno, agbno, map->br_blockcount,
+- &ebno, &elen, true);
++ error = xfs_reflink_find_shared(mp, NULL, agno, agbno,
++ map->br_blockcount, &ebno, &elen, true);
+ if (error)
+ return error;
+
+diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
+index ffe6fe7a7eb5..e25c9953a7c9 100644
+--- a/fs/xfs/xfs_reflink.c
++++ b/fs/xfs/xfs_reflink.c
+@@ -155,6 +155,7 @@
+ int
+ xfs_reflink_find_shared(
+ struct xfs_mount *mp,
++ struct xfs_trans *tp,
+ xfs_agnumber_t agno,
+ xfs_agblock_t agbno,
+ xfs_extlen_t aglen,
+@@ -166,18 +167,18 @@ xfs_reflink_find_shared(
+ struct xfs_btree_cur *cur;
+ int error;
+
+- error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
++ error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
+ if (error)
+ return error;
+
+- cur = xfs_refcountbt_init_cursor(mp, NULL, agbp, agno, NULL);
++ cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, NULL);
+
+ error = xfs_refcount_find_shared(cur, agbno, aglen, fbno, flen,
+ find_end_of_shared);
+
+ xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+
+- xfs_buf_relse(agbp);
++ xfs_trans_brelse(tp, agbp);
+ return error;
+ }
+
+@@ -217,7 +218,7 @@ xfs_reflink_trim_around_shared(
+ agbno = XFS_FSB_TO_AGBNO(ip->i_mount, irec->br_startblock);
+ aglen = irec->br_blockcount;
+
+- error = xfs_reflink_find_shared(ip->i_mount, agno, agbno,
++ error = xfs_reflink_find_shared(ip->i_mount, NULL, agno, agbno,
+ aglen, &fbno, &flen, true);
+ if (error)
+ return error;
+@@ -1373,8 +1374,8 @@ xfs_reflink_dirty_extents(
+ agbno = XFS_FSB_TO_AGBNO(mp, map[1].br_startblock);
+ aglen = map[1].br_blockcount;
+
+- error = xfs_reflink_find_shared(mp, agno, agbno, aglen,
+- &rbno, &rlen, true);
++ error = xfs_reflink_find_shared(mp, NULL, agno, agbno,
++ aglen, &rbno, &rlen, true);
+ if (error)
+ goto out;
+ if (rbno == NULLAGBLOCK)
+@@ -1445,7 +1446,7 @@ xfs_reflink_clear_inode_flag(
+ agbno = XFS_FSB_TO_AGBNO(mp, map.br_startblock);
+ aglen = map.br_blockcount;
+
+- error = xfs_reflink_find_shared(mp, agno, agbno, aglen,
++ error = xfs_reflink_find_shared(mp, *tpp, agno, agbno, aglen,
+ &rbno, &rlen, false);
+ if (error)
+ return error;
+diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h
+index d29a7967f029..b8cc5c3e2724 100644
+--- a/fs/xfs/xfs_reflink.h
++++ b/fs/xfs/xfs_reflink.h
+@@ -20,9 +20,9 @@
+ #ifndef __XFS_REFLINK_H
+ #define __XFS_REFLINK_H 1
+
+-extern int xfs_reflink_find_shared(struct xfs_mount *mp, xfs_agnumber_t agno,
+- xfs_agblock_t agbno, xfs_extlen_t aglen, xfs_agblock_t *fbno,
+- xfs_extlen_t *flen, bool find_maximal);
++extern int xfs_reflink_find_shared(struct xfs_mount *mp, struct xfs_trans *tp,
++ xfs_agnumber_t agno, xfs_agblock_t agbno, xfs_extlen_t aglen,
++ xfs_agblock_t *fbno, xfs_extlen_t *flen, bool find_maximal);
+ extern int xfs_reflink_trim_around_shared(struct xfs_inode *ip,
+ struct xfs_bmbt_irec *irec, bool *shared, bool *trimmed);
+
+--
+2.16.4
+
diff --git a/patches.fixes/xfs-reflink-should-break-pnfs-leases-before-sharing-.patch b/patches.fixes/xfs-reflink-should-break-pnfs-leases-before-sharing-.patch
new file mode 100644
index 0000000000..2faa033df6
--- /dev/null
+++ b/patches.fixes/xfs-reflink-should-break-pnfs-leases-before-sharing-.patch
@@ -0,0 +1,91 @@
+From 1364b1d4b5df31a05b6a3e6fdeb5371dbd4bd8ac Mon Sep 17 00:00:00 2001
+From: "Darrick J. Wong" <darrick.wong@oracle.com>
+Date: Thu, 18 Jan 2018 13:55:20 -0800
+Subject: [PATCH] xfs: reflink should break pnfs leases before sharing blocks
+Git-commit: 1364b1d4b5df31a05b6a3e6fdeb5371dbd4bd8ac
+Patch-mainline: v4.16-rc1
+References: bsc#1132369, bsc#1132219
+
+Before we share blocks between files, we need to break the pnfs leases
+on the layout before we start slicing and dicing the block map. The
+structure of this function sets us up for the lock contention reduction
+in the next patch.
+
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Brian Foster <bfoster@redhat.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Acked-by: Anthony Iliopoulos <ailiopoulos@suse.com>
+
+---
+ fs/xfs/xfs_reflink.c | 48 +++++++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 47 insertions(+), 1 deletion(-)
+
+diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
+index bcc2ad4f0899..bac464f0bc59 100644
+--- a/fs/xfs/xfs_reflink.c
++++ b/fs/xfs/xfs_reflink.c
+@@ -1244,6 +1244,50 @@ xfs_reflink_remap_blocks(
+ return error;
+ }
+
++/*
++ * Grab the exclusive iolock for a data copy from src to dest, making
++ * sure to abide vfs locking order (lowest pointer value goes first) and
++ * breaking the pnfs layout leases on dest before proceeding. The loop
++ * is needed because we cannot call the blocking break_layout() with the
++ * src iolock held, and therefore have to back out both locks.
++ */
++static int
++xfs_iolock_two_inodes_and_break_layout(
++ struct inode *src,
++ struct inode *dest)
++{
++ int error;
++
++retry:
++ if (src < dest) {
++ inode_lock(src);
++ inode_lock_nested(dest, I_MUTEX_NONDIR2);
++ } else {
++ /* src >= dest */
++ inode_lock(dest);
++ }
++
++ error = break_layout(dest, false);
++ if (error == -EWOULDBLOCK) {
++ inode_unlock(dest);
++ if (src < dest)
++ inode_unlock(src);
++ error = break_layout(dest, true);
++ if (error)
++ return error;
++ goto retry;
++ }
++ if (error) {
++ inode_unlock(dest);
++ if (src < dest)
++ inode_unlock(src);
++ return error;
++ }
++ if (src > dest)
++ inode_lock_nested(src, I_MUTEX_NONDIR2);
++ return 0;
++}
++
+ /*
+ * Link a range of blocks from one file to another.
+ */
+@@ -1274,7 +1318,9 @@ xfs_reflink_remap_range(
+ return -EIO;
+
+ /* Lock both files against IO */
+- lock_two_nondirectories(inode_in, inode_out);
++ ret = xfs_iolock_two_inodes_and_break_layout(inode_in, inode_out);
++ if (ret)
++ return ret;
+ if (same_inode)
+ xfs_ilock(src, XFS_MMAPLOCK_EXCL);
+ else
+--
+2.16.4
+
diff --git a/patches.fixes/xfs-remove-dest-file-s-post-eof-preallocations-befor.patch b/patches.fixes/xfs-remove-dest-file-s-post-eof-preallocations-befor.patch
new file mode 100644
index 0000000000..8b610c71eb
--- /dev/null
+++ b/patches.fixes/xfs-remove-dest-file-s-post-eof-preallocations-befor.patch
@@ -0,0 +1,54 @@
+From 5c989a0ee06eb77a44baffd1779a5dbb9a7e873f Mon Sep 17 00:00:00 2001
+From: "Darrick J. Wong" <darrick.wong@oracle.com>
+Date: Sun, 10 Dec 2017 18:03:54 -0800
+Subject: [PATCH] xfs: remove dest file's post-eof preallocations before
+ reflinking
+Git-commit: 5c989a0ee06eb77a44baffd1779a5dbb9a7e873f
+Patch-mainline: v4.15-rc5
+References: bsc#1132365, bsc#1132219
+
+If we try to reflink into a file with post-eof preallocations at an
+offset well past the preallocations, we increase i_size as one would
+expect. However, those allocations do not have page cache backing them,
+so they won't get cleaned out on their own. This leads to asserts in
+the collapse/insert range code and xfs_destroy_inode when they encounter
+delalloc extents they weren't expecting to find.
+
+Since there are plenty of other places where we dump those post-eof
+blocks, do the same to the reflink destination file before we start
+remapping extents. This was found by adding clonerange support to
+fsstress and running it in write-only mode.
+
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Acked-by: Anthony Iliopoulos <ailiopoulos@suse.com>
+
+---
+ fs/xfs/xfs_reflink.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
+index cf7c8f81bebb..e13f5ad57a03 100644
+--- a/fs/xfs/xfs_reflink.c
++++ b/fs/xfs/xfs_reflink.c
+@@ -1291,6 +1291,17 @@ xfs_reflink_remap_range(
+
+ trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
+
++ /*
++ * Clear out post-eof preallocations because we don't have page cache
++ * backing the delayed allocations and they'll never get freed on
++ * their own.
++ */
++ if (xfs_can_free_eofblocks(dest, true)) {
++ ret = xfs_free_eofblocks(dest);
++ if (ret)
++ goto out_unlock;
++ }
++
+ /* Set flags and remap blocks. */
+ ret = xfs_reflink_set_inode_flag(src, dest);
+ if (ret)
+--
+2.16.4
+
diff --git a/patches.fixes/xfs-update-ctime-and-remove-suid-before-cloning-file.patch b/patches.fixes/xfs-update-ctime-and-remove-suid-before-cloning-file.patch
new file mode 100644
index 0000000000..8330e448f9
--- /dev/null
+++ b/patches.fixes/xfs-update-ctime-and-remove-suid-before-cloning-file.patch
@@ -0,0 +1,66 @@
+From 7debbf015f580693680f3d2a3cef0cf99dcef688 Mon Sep 17 00:00:00 2001
+From: "Darrick J. Wong" <darrick.wong@oracle.com>
+Date: Fri, 5 Oct 2018 19:05:41 +1000
+Subject: [PATCH] xfs: update ctime and remove suid before cloning files
+Git-commit: 7debbf015f580693680f3d2a3cef0cf99dcef688
+Patch-mainline: v4.19-rc8
+References: bsc#1132404, bsc#1132219
+
+Before cloning into a file, update the ctime and remove sensitive
+attributes like suid, just like we'd do for a regular file write.
+
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Dave Chinner <dchinner@redhat.com>
+Signed-off-by: Dave Chinner <david@fromorbit.com>
+Acked-by: Anthony Iliopoulos <ailiopoulos@suse.com>
+
+---
+ fs/xfs/xfs_reflink.c | 25 +++++++++++++++++++++++++
+ 1 file changed, 25 insertions(+)
+
+diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
+index f135748d8282..59da9708e9c1 100644
+--- a/fs/xfs/xfs_reflink.c
++++ b/fs/xfs/xfs_reflink.c
+@@ -1264,6 +1264,7 @@ xfs_reflink_zero_posteof(
+ * Prepare two files for range cloning. Upon a successful return both inodes
+ * will have the iolock and mmaplock held, the page cache of the out file
+ * will be truncated, and any leases on the out file will have been broken.
++ * This function borrows heavily from xfs_file_aio_write_checks.
+ */
+ STATIC int
+ xfs_reflink_remap_prep(
+@@ -1327,6 +1328,30 @@ xfs_reflink_remap_prep(
+ /* Zap any page cache for the destination file's range. */
+ truncate_inode_pages_range(&inode_out->i_data, pos_out,
+ PAGE_ALIGN(pos_out + *len) - 1);
++
++ /* If we're altering the file contents... */
++ if (!is_dedupe) {
++ /*
++ * ...update the timestamps (which will grab the ilock again
++ * from xfs_fs_dirty_inode, so we have to call it before we
++ * take the ilock).
++ */
++ if (!(file_out->f_mode & FMODE_NOCMTIME)) {
++ ret = file_update_time(file_out);
++ if (ret)
++ goto out_unlock;
++ }
++
++ /*
++ * ...clear the security bits if the process is not being run
++ * by root. This keeps people from modifying setuid and setgid
++ * binaries.
++ */
++ ret = file_remove_privs(file_out);
++ if (ret)
++ goto out_unlock;
++ }
++
+ return 1;
+ out_unlock:
+ xfs_reflink_remap_unlock(file_in, file_out);
+--
+2.16.4
+
diff --git a/patches.fixes/xfs-zero-posteof-blocks-when-cloning-above-eof.patch b/patches.fixes/xfs-zero-posteof-blocks-when-cloning-above-eof.patch
new file mode 100644
index 0000000000..99a4039e89
--- /dev/null
+++ b/patches.fixes/xfs-zero-posteof-blocks-when-cloning-above-eof.patch
@@ -0,0 +1,78 @@
+From 410fdc72b05afabef3afb51167085799dcc7b3cf Mon Sep 17 00:00:00 2001
+From: "Darrick J. Wong" <darrick.wong@oracle.com>
+Date: Fri, 5 Oct 2018 19:04:27 +1000
+Subject: [PATCH] xfs: zero posteof blocks when cloning above eof
+Git-commit: 410fdc72b05afabef3afb51167085799dcc7b3cf
+Patch-mainline: v4.19-rc8
+References: bsc#1132403, bsc#1132219
+
+When we're reflinking between two files and the destination file range
+is well beyond the destination file's EOF marker, zero any posteof
+speculative preallocations in the destination file so that we don't
+expose stale disk contents. The previous strategy of trying to clear
+the preallocations does not work if the destination file has the
+PREALLOC flag set.
+
+Uncovered by shared/010.
+
+Reported-by: Zorro Lang <zlang@redhat.com>
+Bugzilla-id: https://bugzilla.kernel.org/show_bug.cgi?id=201259
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Dave Chinner <dchinner@redhat.com>
+Signed-off-by: Dave Chinner <david@fromorbit.com>
+Acked-by: Anthony Iliopoulos <ailiopoulos@suse.com>
+
+---
+ fs/xfs/xfs_reflink.c | 33 +++++++++++++++++++++++++--------
+ 1 file changed, 25 insertions(+), 8 deletions(-)
+
+--- a/fs/xfs/xfs_reflink.c
++++ b/fs/xfs/xfs_reflink.c
+@@ -1303,6 +1303,26 @@
+ }
+
+ /*
++ * If we're reflinking to a point past the destination file's EOF, we must
++ * zero any speculative post-EOF preallocations that sit between the old EOF
++ * and the destination file offset.
++ */
++static int
++xfs_reflink_zero_posteof(
++ struct xfs_inode *ip,
++ loff_t pos)
++{
++ loff_t isize = i_size_read(VFS_I(ip));
++
++ if (pos <= isize)
++ return 0;
++
++ trace_xfs_zero_eof(ip, isize, pos - isize);
++ return iomap_zero_range(VFS_I(ip), isize, pos - isize, NULL,
++ &xfs_iomap_ops);
++}
++
++/*
+ * Prepare two files for range cloning. Upon a successful return both inodes
+ * will have the iolock and mmaplock held, the page cache of the out file
+ * will be truncated, and any leases on the out file will have been broken.
+@@ -1354,15 +1374,12 @@
+ goto out_unlock;
+
+ /*
+- * Clear out post-eof preallocations because we don't have page cache
+- * backing the delayed allocations and they'll never get freed on
+- * their own.
++ * Zero existing post-eof speculative preallocations in the destination
++ * file.
+ */
+- if (xfs_can_free_eofblocks(dest, true)) {
+- ret = xfs_free_eofblocks(dest);
+- if (ret)
+- goto out_unlock;
+- }
++ ret = xfs_reflink_zero_posteof(dest, pos_out);
++ if (ret)
++ goto out_unlock;
+
+ /* Set flags and remap blocks. */
+ ret = xfs_reflink_set_inode_flag(src, dest);
diff --git a/patches.kabi/kabi-restore-icmp_send.patch b/patches.kabi/kabi-restore-icmp_send.patch
new file mode 100644
index 0000000000..1de76deaf1
--- /dev/null
+++ b/patches.kabi/kabi-restore-icmp_send.patch
@@ -0,0 +1,55 @@
+From: Jiri Slaby <jslaby@suse.cz>
+Subject: kABI: restore icmp_send
+Patch-mainline: never, kabi
+References: kabi
+
+In networking-stable-19_03_07, upstream commit
+9ef6b42ad6fd7929dd1b6092cb02014e382c6a91 (net: Add __icmp_send helper.)
+renamed icmp_send function to __icmp_send and introduced an inline
+helper icmp_send. It made the kABI checker to complain.
+
+Change the inline into a proper function and export it again.
+
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ include/net/icmp.h | 7 +------
+ net/ipv4/icmp.c | 5 +++++
+ 2 files changed, 6 insertions(+), 6 deletions(-)
+
+--- a/include/net/icmp.h
++++ b/include/net/icmp.h
+@@ -22,7 +22,6 @@
+
+ #include <net/inet_sock.h>
+ #include <net/snmp.h>
+-#include <net/ip.h>
+
+ struct icmp_err {
+ int errno;
+@@ -42,11 +41,7 @@ struct net;
+
+ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+ const struct ip_options *opt);
+-static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+-{
+- __icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt);
+-}
+-
++void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info);
+ int icmp_rcv(struct sk_buff *skb);
+ void icmp_err(struct sk_buff *skb, u32 info);
+ int icmp_init(void);
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -750,6 +750,11 @@ out:;
+ }
+ EXPORT_SYMBOL(__icmp_send);
+
++void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
++{
++ __icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt);
++}
++EXPORT_SYMBOL(icmp_send);
+
+ static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
+ {
diff --git a/patches.suse/0001-x86-tsc-Force-inlining-of-cyc2ns-bits.patch b/patches.suse/0001-x86-tsc-Force-inlining-of-cyc2ns-bits.patch
new file mode 100644
index 0000000000..e2e2a3479a
--- /dev/null
+++ b/patches.suse/0001-x86-tsc-Force-inlining-of-cyc2ns-bits.patch
@@ -0,0 +1,61 @@
+From 6ca9241226a64f35f86696e8b3a523aa5955750f Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Thu, 11 Apr 2019 10:43:16 -0700
+Subject: [PATCH] x86/tsc: Force inlining of cyc2ns bits
+Git-commit: 4907c68abd3f60f650f98d5a69d4ec77c0bde44f
+Patch-mainline: v4.10
+References: bsc#1052904
+
+Looking at the asm for native_sched_clock() I noticed we don't inline
+enough. Mostly caused by sharing code with cyc2ns_read_begin(), which
+we didn't used to do. So mark all that __force_inline to make it DTRT.
+
+Fixes: 59eaef78bfea ("x86/tsc: Remodel cyc2ns to use seqcount_latch()")
+Reported-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: hpa@zytor.com
+Cc: eric.dumazet@gmail.com
+Cc: bp@alien8.de
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181011104019.695196158@infradead.org
+Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
+
+---
+ arch/x86/kernel/tsc.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
+index 1f7776c1cbb3..a3633285a2e5 100644
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -60,7 +60,7 @@ struct cyc2ns {
+
+ static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
+
+-void cyc2ns_read_begin(struct cyc2ns_data *data)
++void __always_inline cyc2ns_read_begin(struct cyc2ns_data *data)
+ {
+ int seq, idx;
+
+@@ -77,7 +77,7 @@ void cyc2ns_read_begin(struct cyc2ns_data *data)
+ } while (unlikely(seq != this_cpu_read(cyc2ns.seq.sequence)));
+ }
+
+-void cyc2ns_read_end(void)
++void __always_inline cyc2ns_read_end(void)
+ {
+ preempt_enable_notrace();
+ }
+@@ -123,7 +123,7 @@ static void cyc2ns_init(int cpu)
+ seqcount_init(&c2n->seq);
+ }
+
+-static inline unsigned long long cycles_2_ns(unsigned long long cyc)
++static __always_inline unsigned long long cycles_2_ns(unsigned long long cyc)
+ {
+ struct cyc2ns_data data;
+ unsigned long long ns;
+--
+2.16.4
+
diff --git a/patches.suse/bnxt_en-Drop-oversize-TX-packets-to-prevent-errors.patch b/patches.suse/bnxt_en-Drop-oversize-TX-packets-to-prevent-errors.patch
new file mode 100644
index 0000000000..c62adf3669
--- /dev/null
+++ b/patches.suse/bnxt_en-Drop-oversize-TX-packets-to-prevent-errors.patch
@@ -0,0 +1,43 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Wed, 27 Feb 2019 03:58:53 -0500
+Subject: bnxt_en: Drop oversize TX packets to prevent errors.
+Git-commit: 2b3c6885386020b1b9d92d45e8349637e27d1f66
+Patch-mainline: v5.0
+References: networking-stable-19_03_07
+
+There have been reports of oversize UDP packets being sent to the
+driver to be transmitted, causing error conditions. The issue is
+likely caused by the dst of the SKB switching between 'lo' with
+64K MTU and the hardware device with a smaller MTU. Patches are
+being proposed by Mahesh Bandewar <maheshb@google.com> to fix the
+issue.
+
+In the meantime, add a quick length check in the driver to prevent
+the error. The driver uses the TX packet size as index to look up an
+array to setup the TX BD. The array is large enough to support all MTU
+sizes supported by the driver. The oversize TX packet causes the
+driver to index beyond the array and put garbage values into the
+TX BD. Add a simple check to prevent this.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -460,6 +460,12 @@ normal_tx:
+ }
+
+ length >>= 9;
++ if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
++ dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
++ skb->len);
++ i = 0;
++ goto tx_dma_error;
++ }
+ flags |= bnxt_lhint_arr[length];
+ txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+
diff --git a/patches.suse/do-not-default-to-ibrs-on-skl.patch b/patches.suse/do-not-default-to-ibrs-on-skl.patch
new file mode 100644
index 0000000000..db22ce5356
--- /dev/null
+++ b/patches.suse/do-not-default-to-ibrs-on-skl.patch
@@ -0,0 +1,54 @@
+From: Jiri Kosina <jkosina@suse.cz>
+Subject: x86/bugs: do not default to IBRS even on SKL
+Patch-mainline: never, SUSE specific
+References: bsc#1068032 CVE-2017-5753 bsc#1112824
+
+Disable SUSE-specific IBRS-on-SKL implementation. Please refer to page 16 of
+
+https://software.intel.com/security-software-guidance/api-app/sites/default/files/Retpoline-A-Branch-Target-Injection-Mitigation.pdf
+
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -587,23 +587,6 @@
+ return cmd;
+ }
+
+-/* Check for Skylake-like CPUs (for RSB and IBRS handling) */
+-static bool __init is_skylake_era(void)
+-{
+- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+- boot_cpu_data.x86 == 6) {
+- switch (boot_cpu_data.x86_model) {
+- case INTEL_FAM6_SKYLAKE_MOBILE:
+- case INTEL_FAM6_SKYLAKE_DESKTOP:
+- case INTEL_FAM6_SKYLAKE_X:
+- case INTEL_FAM6_KABYLAKE_MOBILE:
+- case INTEL_FAM6_KABYLAKE_DESKTOP:
+- return true;
+- }
+- }
+- return false;
+-}
+-
+ static void __init spectre_v2_select_mitigation(void)
+ {
+ enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
+@@ -644,16 +627,6 @@
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+ goto specv2_set_mode;
+ }
+-
+- /*
+- * If we have IBRS support, and either Skylake or !RETPOLINE,
+- * then that's what we do.
+- */
+- if (boot_cpu_has(X86_FEATURE_IBRS) && is_skylake_era()) {
+- mode = SPECTRE_V2_IBRS;
+- setup_force_cpu_cap(X86_FEATURE_USE_IBRS);
+- goto specv2_set_mode;
+- }
+ /* fall through */
+ case SPECTRE_V2_CMD_RETPOLINE:
+ if (IS_ENABLED(CONFIG_RETPOLINE))
diff --git a/patches.suse/dsa-mv88e6xxx-Ensure-all-pending-interrupts-are-hand.patch b/patches.suse/dsa-mv88e6xxx-Ensure-all-pending-interrupts-are-hand.patch
new file mode 100644
index 0000000000..bd9687e146
--- /dev/null
+++ b/patches.suse/dsa-mv88e6xxx-Ensure-all-pending-interrupts-are-hand.patch
@@ -0,0 +1,89 @@
+From: John David Anglin <dave.anglin@bell.net>
+Date: Mon, 11 Feb 2019 13:40:21 -0500
+Subject: dsa: mv88e6xxx: Ensure all pending interrupts are handled prior to
+ exit
+Git-commit: 7c0db24cc431e2196d98a5d5ddaa9088e2fcbfe5
+Patch-mainline: v5.0-rc7
+References: networking-stable-19_02_20
+
+The GPIO interrupt controller on the espressobin board only supports edge interrupts.
+If one enables the use of hardware interrupts in the device tree for the 88E6341, it is
+possible to miss an edge. When this happens, the INTn pin on the Marvell switch is
+stuck low and no further interrupts occur.
+
+I found after adding debug statements to mv88e6xxx_g1_irq_thread_work() that there is
+a race in handling device interrupts (e.g. PHY link interrupts). Some interrupts are
+directly cleared by reading the Global 1 status register. However, the device interrupt
+flag, for example, is not cleared until all the unmasked SERDES and PHY ports are serviced.
+This is done by reading the relevant SERDES and PHY status register.
+
+The code only services interrupts whose status bit is set at the time of reading its status
+register. If an interrupt event occurs after its status is read and before all interrupts
+are serviced, then this event will not be serviced and the INTn output pin will remain low.
+
+This is not a problem with polling or level interrupts since the handler will be called
+again to process the event. However, it's a big problem when using level interrupts.
+
+The fix presented here is to add a loop around the code servicing switch interrupts. If
+any pending interrupts remain after the current set has been handled, we loop and process
+the new set. If there are no pending interrupts after servicing, we are sure that INTn has
+gone high and we will get an edge when a new event occurs.
+
+Tested on espressobin board.
+
+[js] use the old names of the macros.
+
+Fixes: dc30c35be720 ("net: dsa: mv88e6xxx: Implement interrupt support.")
+Signed-off-by: John David Anglin <dave.anglin@bell.net>
+Tested-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ drivers/net/dsa/mv88e6xxx/chip.c | 28 ++++++++++++++++++++++------
+ 1 file changed, 22 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -371,6 +371,7 @@ static irqreturn_t mv88e6xxx_g1_irq_thre
+ unsigned int sub_irq;
+ unsigned int n;
+ u16 reg;
++ u16 ctl1;
+ int err;
+
+ mutex_lock(&chip->reg_lock);
+@@ -380,13 +381,28 @@ static irqreturn_t mv88e6xxx_g1_irq_thre
+ if (err)
+ goto out;
+
+- for (n = 0; n < chip->g1_irq.nirqs; ++n) {
+- if (reg & (1 << n)) {
+- sub_irq = irq_find_mapping(chip->g1_irq.domain, n);
+- handle_nested_irq(sub_irq);
+- ++nhandled;
++ do {
++ for (n = 0; n < chip->g1_irq.nirqs; ++n) {
++ if (reg & (1 << n)) {
++ sub_irq = irq_find_mapping(chip->g1_irq.domain,
++ n);
++ handle_nested_irq(sub_irq);
++ ++nhandled;
++ }
+ }
+- }
++
++ mutex_lock(&chip->reg_lock);
++ err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &ctl1);
++ if (err)
++ goto unlock;
++ err = mv88e6xxx_g1_read(chip, GLOBAL_STATUS, &reg);
++unlock:
++ mutex_unlock(&chip->reg_lock);
++ if (err)
++ goto out;
++ ctl1 &= GENMASK(chip->g1_irq.nirqs, 0);
++ } while (reg & ctl1);
++
+ out:
+ return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
+ }
diff --git a/patches.suse/hv_netvsc-Fix-IP-header-checksum-for-coalesced-packe.patch b/patches.suse/hv_netvsc-Fix-IP-header-checksum-for-coalesced-packe.patch
new file mode 100644
index 0000000000..29074a12f8
--- /dev/null
+++ b/patches.suse/hv_netvsc-Fix-IP-header-checksum-for-coalesced-packe.patch
@@ -0,0 +1,57 @@
+From: Haiyang Zhang <haiyangz@microsoft.com>
+Date: Fri, 22 Feb 2019 18:25:03 +0000
+Subject: hv_netvsc: Fix IP header checksum for coalesced packets
+Git-commit: bf48648d650db1146b75b9bd358502431e86cf4f
+Patch-mainline: v5.0
+References: networking-stable-19_03_07
+
+Incoming packets may have IP header checksum verified by the host.
+They may not have IP header checksum computed after coalescing.
+This patch re-compute the checksum when necessary, otherwise the
+packets may be dropped, because Linux network stack always checks it.
+
+Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ drivers/net/hyperv/netvsc_drv.c | 22 +++++++++++++++++++---
+ 1 file changed, 19 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -743,6 +743,14 @@ void netvsc_linkstatus_callback(struct n
+ schedule_delayed_work(&ndev_ctx->dwork, 0);
+ }
+
++static void netvsc_comp_ipcsum(struct sk_buff *skb)
++{
++ struct iphdr *iph = (struct iphdr *)skb->data;
++
++ iph->check = 0;
++ iph->check = ip_fast_csum(iph, iph->ihl);
++}
++
+ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
+ struct napi_struct *napi,
+ const struct ndis_tcp_ip_checksum_info *csum_info,
+@@ -766,9 +774,17 @@ static struct sk_buff *netvsc_alloc_recv
+ /* skb is already created with CHECKSUM_NONE */
+ skb_checksum_none_assert(skb);
+
+- /*
+- * In Linux, the IP checksum is always checked.
+- * Do L4 checksum offload if enabled and present.
++ /* Incoming packets may have IP header checksum verified by the host.
++ * They may not have IP header checksum computed after coalescing.
++ * We compute it here if the flags are set, because on Linux, the IP
++ * checksum is always checked.
++ */
++ if (csum_info && csum_info->receive.ip_checksum_value_invalid &&
++ csum_info->receive.ip_checksum_succeeded &&
++ skb->protocol == htons(ETH_P_IP))
++ netvsc_comp_ipcsum(skb);
++
++ /* Do L4 checksum offload if enabled and present.
+ */
+ if (csum_info && (net->features & NETIF_F_RXCSUM)) {
+ if (csum_info->receive.tcp_checksum_succeeded ||
diff --git a/patches.suse/ipv4-Return-error-for-RTA_VIA-attribute.patch b/patches.suse/ipv4-Return-error-for-RTA_VIA-attribute.patch
new file mode 100644
index 0000000000..726565e427
--- /dev/null
+++ b/patches.suse/ipv4-Return-error-for-RTA_VIA-attribute.patch
@@ -0,0 +1,43 @@
+From: David Ahern <dsahern@gmail.com>
+Date: Tue, 26 Feb 2019 09:00:02 -0800
+Subject: ipv4: Return error for RTA_VIA attribute
+Git-commit: b6e9e5df4ecf100f6a10ab2ade8e46d47a4b9779
+Patch-mainline: v5.0
+References: networking-stable-19_03_07
+
+IPv4 currently does not support nexthops outside of the AF_INET family.
+Specifically, it does not handle RTA_VIA attribute. If it is passed
+in a route add request, the actual route added only uses the device
+which is clearly not what the user intended:
+
+ $ ip ro add 172.16.1.0/24 via inet6 2001:db8:1::1 dev eth0
+ $ ip ro ls
+ ...
+ 172.16.1.0/24 dev eth0
+
+Catch this and fail the route add:
+ $ ip ro add 172.16.1.0/24 via inet6 2001:db8:1::1 dev eth0
+ Error: IPv4 does not support RTA_VIA attribute.
+
+[js] no extack in 4.12 here yet.
+
+Fixes: 03c0566542f4c ("mpls: Netlink commands to add, remove, and dump routes")
+Signed-off-by: David Ahern <dsahern@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/ipv4/fib_frontend.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -671,6 +671,9 @@ static int rtm_to_fib_config(struct net
+ case RTA_GATEWAY:
+ cfg->fc_gw = nla_get_be32(attr);
+ break;
++ case RTA_VIA:
++ err = -EINVAL;
++ goto errout;
+ case RTA_PRIORITY:
+ cfg->fc_priority = nla_get_u32(attr);
+ break;
diff --git a/patches.suse/ipv6-Return-error-for-RTA_VIA-attribute.patch b/patches.suse/ipv6-Return-error-for-RTA_VIA-attribute.patch
new file mode 100644
index 0000000000..8ee4feb585
--- /dev/null
+++ b/patches.suse/ipv6-Return-error-for-RTA_VIA-attribute.patch
@@ -0,0 +1,43 @@
+From: David Ahern <dsahern@gmail.com>
+Date: Tue, 26 Feb 2019 09:00:03 -0800
+Subject: ipv6: Return error for RTA_VIA attribute
+Git-commit: e3818541b49fb88650ba339d33cc53e4095da5b3
+Patch-mainline: v5.0
+References: networking-stable-19_03_07
+
+IPv6 currently does not support nexthops outside of the AF_INET6 family.
+Specifically, it does not handle RTA_VIA attribute. If it is passed
+in a route add request, the actual route added only uses the device
+which is clearly not what the user intended:
+
+ $ ip -6 ro add 2001:db8:2::/64 via inet 172.16.1.1 dev eth0
+ $ ip ro ls
+ ...
+ 2001:db8:2::/64 dev eth0 metric 1024 pref medium
+
+Catch this and fail the route add:
+ $ ip -6 ro add 2001:db8:2::/64 via inet 172.16.1.1 dev eth0
+ Error: IPv6 does not support RTA_VIA attribute.
+
+[js] no extack in 4.12 here yet.
+
+Fixes: 03c0566542f4c ("mpls: Netlink commands to add, remove, and dump routes")
+Signed-off-by: David Ahern <dsahern@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/ipv6/route.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2916,6 +2916,9 @@ static int rtm_to_fib6_config(struct sk_
+ cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
+ cfg->fc_flags |= RTF_GATEWAY;
+ }
++ if (tb[RTA_VIA]) {
++ goto errout;
++ }
+
+ if (tb[RTA_DST]) {
+ int plen = (rtm->rtm_dst_len + 7) >> 3;
diff --git a/patches.suse/ipv6-propagate-genlmsg_reply-return-code.patch b/patches.suse/ipv6-propagate-genlmsg_reply-return-code.patch
new file mode 100644
index 0000000000..46ccb82bfe
--- /dev/null
+++ b/patches.suse/ipv6-propagate-genlmsg_reply-return-code.patch
@@ -0,0 +1,35 @@
+From: Li RongQing <lirongqing@baidu.com>
+Date: Mon, 11 Feb 2019 19:32:20 +0800
+Subject: ipv6: propagate genlmsg_reply return code
+Git-commit: d1f20798a119be71746949ba9b2e2ff330fdc038
+Patch-mainline: v5.0-rc7
+References: networking-stable-19_02_24
+
+genlmsg_reply can fail, so propagate its return code
+
+Fixes: 915d7e5e593 ("ipv6: sr: add code base for control plane support of SR-IPv6")
+Signed-off-by: Li RongQing <lirongqing@baidu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/ipv6/seg6.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
+index 8d0ba757a46c..9b2f272ca164 100644
+--- a/net/ipv6/seg6.c
++++ b/net/ipv6/seg6.c
+@@ -221,9 +221,7 @@ static int seg6_genl_get_tunsrc(struct sk_buff *skb, struct genl_info *info)
+ rcu_read_unlock();
+
+ genlmsg_end(msg, hdr);
+- genlmsg_reply(msg, info);
+-
+- return 0;
++ return genlmsg_reply(msg, info);
+
+ nla_put_failure:
+ rcu_read_unlock();
+--
+2.21.0
+
diff --git a/patches.suse/ipvlan-disallow-userns-cap_net_admin-to-change-globa.patch b/patches.suse/ipvlan-disallow-userns-cap_net_admin-to-change-globa.patch
new file mode 100644
index 0000000000..69bf43ecfb
--- /dev/null
+++ b/patches.suse/ipvlan-disallow-userns-cap_net_admin-to-change-globa.patch
@@ -0,0 +1,128 @@
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Wed, 20 Feb 2019 00:15:30 +0100
+Subject: ipvlan: disallow userns cap_net_admin to change global mode/flags
+Git-commit: 7cc9f7003a969d359f608ebb701d42cafe75b84a
+Patch-mainline: v5.0-rc8
+References: networking-stable-19_03_15
+
+When running Docker with userns isolation e.g. --userns-remap="default"
+and spawning up some containers with CAP_NET_ADMIN under this realm, I
+noticed that link changes on ipvlan slave device inside that container
+can affect all devices from this ipvlan group which are in other net
+namespaces where the container should have no permission to make changes
+to, such as the init netns, for example.
+
+This effectively allows to undo ipvlan private mode and switch globally to
+bridge mode where slaves can communicate directly without going through
+hostns, or it allows to switch between global operation mode (l2/l3/l3s)
+for everyone bound to the given ipvlan master device. libnetwork plugin
+here is creating an ipvlan master and ipvlan slave in hostns and a slave
+each that is moved into the container's netns upon creation event.
+
+* In hostns:
+
+ # ip -d a
+ [...]
+ 8: cilium_host@bond0: <BROADCAST,MULTICAST,NOARP,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN group default qlen 1000
+ link/ether 0c:c4:7a:e1:3d:cc brd ff:ff:ff:ff:ff:ff promiscuity 0 minmtu 68 maxmtu 65535
+ ipvlan mode l3 bridge numtxqueues 1 numrxqueues 1 gso_max_size 65536 gso_max_segs 65535
+ inet 10.41.0.1/32 scope link cilium_host
+ valid_lft forever preferred_lft forever
+ [...]
+
+* Spawn container & change ipvlan mode setting inside of it:
+
+ # docker run -dt --cap-add=NET_ADMIN --network cilium-net --name client -l app=test cilium/netperf
+ 9fff485d69dcb5ce37c9e33ca20a11ccafc236d690105aadbfb77e4f4170879c
+
+ # docker exec -ti client ip -d a
+ [...]
+ 10: cilium0@if4: <BROADCAST,MULTICAST,NOARP,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN group default qlen 1000
+ link/ether 0c:c4:7a:e1:3d:cc brd ff:ff:ff:ff:ff:ff promiscuity 0 minmtu 68 maxmtu 65535
+ ipvlan mode l3 bridge numtxqueues 1 numrxqueues 1 gso_max_size 65536 gso_max_segs 65535
+ inet 10.41.197.43/32 brd 10.41.197.43 scope global cilium0
+ valid_lft forever preferred_lft forever
+
+ # docker exec -ti client ip link change link cilium0 name cilium0 type ipvlan mode l2
+
+ # docker exec -ti client ip -d a
+ [...]
+ 10: cilium0@if4: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN group default qlen 1000
+ link/ether 0c:c4:7a:e1:3d:cc brd ff:ff:ff:ff:ff:ff promiscuity 0 minmtu 68 maxmtu 65535
+ ipvlan mode l2 bridge numtxqueues 1 numrxqueues 1 gso_max_size 65536 gso_max_segs 65535
+ inet 10.41.197.43/32 brd 10.41.197.43 scope global cilium0
+ valid_lft forever preferred_lft forever
+
+* In hostns (mode switched to l2):
+
+ # ip -d a
+ [...]
+ 8: cilium_host@bond0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN group default qlen 1000
+ link/ether 0c:c4:7a:e1:3d:cc brd ff:ff:ff:ff:ff:ff promiscuity 0 minmtu 68 maxmtu 65535
+ ipvlan mode l2 bridge numtxqueues 1 numrxqueues 1 gso_max_size 65536 gso_max_segs 65535
+ inet 10.41.0.1/32 scope link cilium_host
+ valid_lft forever preferred_lft forever
+ [...]
+
+Same l3 -> l2 switch would also happen by creating another slave inside
+the container's network namespace when specifying the existing cilium0
+link to derive the actual (bond0) master:
+
+ # docker exec -ti client ip link add link cilium0 name cilium1 type ipvlan mode l2
+
+ # docker exec -ti client ip -d a
+ [...]
+ 2: cilium1@if4: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000
+ link/ether 0c:c4:7a:e1:3d:cc brd ff:ff:ff:ff:ff:ff promiscuity 0 minmtu 68 maxmtu 65535
+ ipvlan mode l2 bridge numtxqueues 1 numrxqueues 1 gso_max_size 65536 gso_max_segs 65535
+ 10: cilium0@if4: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN group default qlen 1000
+ link/ether 0c:c4:7a:e1:3d:cc brd ff:ff:ff:ff:ff:ff promiscuity 0 minmtu 68 maxmtu 65535
+ ipvlan mode l2 bridge numtxqueues 1 numrxqueues 1 gso_max_size 65536 gso_max_segs 65535
+ inet 10.41.197.43/32 brd 10.41.197.43 scope global cilium0
+ valid_lft forever preferred_lft forever
+
+* In hostns:
+
+ # ip -d a
+ [...]
+ 8: cilium_host@bond0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN group default qlen 1000
+ link/ether 0c:c4:7a:e1:3d:cc brd ff:ff:ff:ff:ff:ff promiscuity 0 minmtu 68 maxmtu 65535
+ ipvlan mode l2 bridge numtxqueues 1 numrxqueues 1 gso_max_size 65536 gso_max_segs 65535
+ inet 10.41.0.1/32 scope link cilium_host
+ valid_lft forever preferred_lft forever
+ [...]
+
+One way to mitigate it is to check CAP_NET_ADMIN permissions of
+the ipvlan master device's ns, and only then allow to change
+mode or flags for all devices bound to it. Above two cases are
+then disallowed after the patch.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Mahesh Bandewar <maheshb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ drivers/net/ipvlan/ipvlan_main.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/net/ipvlan/ipvlan_main.c
++++ b/drivers/net/ipvlan/ipvlan_main.c
+@@ -462,6 +462,9 @@ static int ipvlan_nl_changelink(struct n
+ struct ipvl_port *port = ipvlan_port_get_rtnl(ipvlan->phy_dev);
+ int err = 0;
+
++ if (!ns_capable(dev_net(ipvlan->phy_dev)->user_ns, CAP_NET_ADMIN))
++ return -EPERM;
++
+ if (data && data[IFLA_IPVLAN_MODE]) {
+ u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
+
+@@ -529,6 +532,8 @@ int ipvlan_link_new(struct net *src_net,
+ struct ipvl_dev *tmp = netdev_priv(phy_dev);
+
+ phy_dev = tmp->phy_dev;
++ if (!ns_capable(dev_net(phy_dev)->user_ns, CAP_NET_ADMIN))
++ return -EPERM;
+ } else if (!netif_is_ipvlan_port(phy_dev)) {
+ /* Exit early if the underlying link is invalid or busy */
+ if (phy_dev->type != ARPHRD_ETHER ||
diff --git a/patches.suse/kcm-switch-order-of-device-registration-to-fix-a-cra.patch b/patches.suse/kcm-switch-order-of-device-registration-to-fix-a-cra.patch
new file mode 100644
index 0000000000..ee967461e0
--- /dev/null
+++ b/patches.suse/kcm-switch-order-of-device-registration-to-fix-a-cra.patch
@@ -0,0 +1,88 @@
+From: Jiri Slaby <jslaby@suse.cz>
+Date: Fri, 29 Mar 2019 12:19:46 +0100
+Subject: kcm: switch order of device registration to fix a crash
+Git-commit: 3c446e6f96997f2a95bf0037ef463802162d2323
+Patch-mainline: v5.1-rc4
+References: bnc#1130527
+
+When kcm is loaded while many processes try to create a KCM socket, a
+crash occurs:
+ BUG: unable to handle kernel NULL pointer dereference at 000000000000000e
+ IP: mutex_lock+0x27/0x40 kernel/locking/mutex.c:240
+ PGD 8000000016ef2067 P4D 8000000016ef2067 PUD 3d6e9067 PMD 0
+ Oops: 0002 [#1] SMP KASAN PTI
+ CPU: 0 PID: 7005 Comm: syz-executor.5 Not tainted 4.12.14-396-default #1 SLE15-SP1 (unreleased)
+ RIP: 0010:mutex_lock+0x27/0x40 kernel/locking/mutex.c:240
+ RSP: 0018:ffff88000d487a00 EFLAGS: 00010246
+ RAX: 0000000000000000 RBX: 000000000000000e RCX: 1ffff100082b0719
+ ...
+ CR2: 000000000000000e CR3: 000000004b1bc003 CR4: 0000000000060ef0
+ Call Trace:
+ kcm_create+0x600/0xbf0 [kcm]
+ __sock_create+0x324/0x750 net/socket.c:1272
+ ...
+
+This is due to race between sock_create and unfinished
+register_pernet_device. kcm_create tries to do "net_generic(net,
+kcm_net_id)". but kcm_net_id is not initialized yet.
+
+So switch the order of the two to close the race.
+
+This can be reproduced with mutiple processes doing socket(PF_KCM, ...)
+and one process doing module removal.
+
+Fixes: ab7ac4eb9832 ("kcm: Kernel Connection Multiplexor module")
+Reviewed-by: Michal Kubecek <mkubecek@suse.cz>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ net/kcm/kcmsock.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/net/kcm/kcmsock.c
++++ b/net/kcm/kcmsock.c
+@@ -2056,14 +2056,14 @@ static int __init kcm_init(void)
+ if (err)
+ goto fail;
+
+- err = sock_register(&kcm_family_ops);
+- if (err)
+- goto sock_register_fail;
+-
+ err = register_pernet_device(&kcm_net_ops);
+ if (err)
+ goto net_ops_fail;
+
++ err = sock_register(&kcm_family_ops);
++ if (err)
++ goto sock_register_fail;
++
+ err = kcm_proc_init();
+ if (err)
+ goto proc_init_fail;
+@@ -2071,12 +2071,12 @@ static int __init kcm_init(void)
+ return 0;
+
+ proc_init_fail:
+- unregister_pernet_device(&kcm_net_ops);
+-
+-net_ops_fail:
+ sock_unregister(PF_KCM);
+
+ sock_register_fail:
++ unregister_pernet_device(&kcm_net_ops);
++
++net_ops_fail:
+ proto_unregister(&kcm_proto);
+
+ fail:
+@@ -2092,8 +2092,8 @@ fail:
+ static void __exit kcm_exit(void)
+ {
+ kcm_proc_exit();
+- unregister_pernet_device(&kcm_net_ops);
+ sock_unregister(PF_KCM);
++ unregister_pernet_device(&kcm_net_ops);
+ proto_unregister(&kcm_proto);
+ destroy_workqueue(kcm_wq);
+
diff --git a/patches.suse/missing-barriers-in-some-of-unix_sock-addr-and-path-.patch b/patches.suse/missing-barriers-in-some-of-unix_sock-addr-and-path-.patch
new file mode 100644
index 0000000000..c161a28cc8
--- /dev/null
+++ b/patches.suse/missing-barriers-in-some-of-unix_sock-addr-and-path-.patch
@@ -0,0 +1,231 @@
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Fri, 15 Feb 2019 20:09:35 +0000
+Subject: missing barriers in some of unix_sock ->addr and ->path accesses
+Git-commit: ae3b564179bfd06f32d051b9e5d72ce4b2a07c37
+Patch-mainline: v5.0-rc8
+References: networking-stable-19_03_15
+
+Several u->addr and u->path users are not holding any locks in
+common with unix_bind(). unix_state_lock() is useless for those
+purposes.
+
+u->addr is assign-once and *(u->addr) is fully set up by the time
+we set u->addr (all under unix_table_lock). u->path is also
+set in the same critical area, also before setting u->addr, and
+any unix_sock with ->path filled will have non-NULL ->addr.
+
+So setting ->addr with smp_store_release() is all we need for those
+"lockless" users - just have them fetch ->addr with smp_load_acquire()
+and don't even bother looking at ->path if they see NULL ->addr.
+
+Users of ->addr and ->path fall into several classes now:
+ 1) ones that do smp_load_acquire(u->addr) and access *(u->addr)
+and u->path only if smp_load_acquire() has returned non-NULL.
+ 2) places holding unix_table_lock. These are guaranteed that
+*(u->addr) is seen fully initialized. If unix_sock is in one of the
+"bound" chains, so's ->path.
+ 3) unix_sock_destructor() using ->addr is safe. All places
+that set u->addr are guaranteed to have seen all stores *(u->addr)
+while holding a reference to u and unix_sock_destructor() is called
+when (atomic) refcount hits zero.
+ 4) unix_release_sock() using ->path is safe. unix_bind()
+is serialized wrt unix_release() (normally - by struct file
+refcount), and for the instances that had ->path set by unix_bind()
+unix_release_sock() comes from unix_release(), so they are fine.
+Instances that had it set in unix_stream_connect() either end up
+attached to a socket (in unix_accept()), in which case the call
+chain to unix_release_sock() and serialization are the same as in
+the previous case, or they never get accept'ed and unix_release_sock()
+is called when the listener is shut down and its queue gets purged.
+In that case the listener's queue lock provides the barriers needed -
+unix_stream_connect() shoves our unix_sock into listener's queue
+under that lock right after having set ->path and eventual
+unix_release_sock() caller picks them from that queue under the
+same lock right before calling unix_release_sock().
+ 5) unix_find_other() use of ->path is pointless, but safe -
+it happens with successful lookup by (abstract) name, so ->path.dentry
+is guaranteed to be NULL there.
+
+earlier-variant-reviewed-by: "Paul E. McKenney" <paulmck@linux.ibm.com>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/unix/af_unix.c | 57 +++++++++++++++++++++++++++++----------------------
+ net/unix/diag.c | 3 +-
+ security/lsm_audit.c | 10 +++++---
+ 3 files changed, 41 insertions(+), 29 deletions(-)
+
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -892,7 +892,7 @@ retry:
+ addr->hash ^= sk->sk_type;
+
+ __unix_remove_socket(sk);
+- u->addr = addr;
++ smp_store_release(&u->addr, addr);
+ __unix_insert_socket(&unix_socket_table[addr->hash], sk);
+ spin_unlock(&unix_table_lock);
+ err = 0;
+@@ -1062,7 +1062,7 @@ static int unix_bind(struct socket *sock
+
+ err = 0;
+ __unix_remove_socket(sk);
+- u->addr = addr;
++ smp_store_release(&u->addr, addr);
+ __unix_insert_socket(list, sk);
+
+ out_unlock:
+@@ -1333,15 +1333,29 @@ restart:
+ RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
+ otheru = unix_sk(other);
+
+- /* copy address information from listening to new sock*/
+- if (otheru->addr) {
+- atomic_inc(&otheru->addr->refcnt);
+- newu->addr = otheru->addr;
+- }
++ /* copy address information from listening to new sock
++ *
++ * The contents of *(otheru->addr) and otheru->path
++ * are seen fully set up here, since we have found
++ * otheru in hash under unix_table_lock. Insertion
++ * into the hash chain we'd found it in had been done
++ * in an earlier critical area protected by unix_table_lock,
++ * the same one where we'd set *(otheru->addr) contents,
++ * as well as otheru->path and otheru->addr itself.
++ *
++ * Using smp_store_release() here to set newu->addr
++ * is enough to make those stores, as well as stores
++ * to newu->path visible to anyone who gets newu->addr
++ * by smp_load_acquire(). IOW, the same warranties
++ * as for unix_sock instances bound in unix_bind() or
++ * in unix_autobind().
++ */
+ if (otheru->path.dentry) {
+ path_get(&otheru->path);
+ newu->path = otheru->path;
+ }
++ atomic_inc(&otheru->addr->refcnt);
++ smp_store_release(&newu->addr, otheru->addr);
+
+ /* Set credentials */
+ copy_peercred(sk, other);
+@@ -1455,7 +1469,7 @@ out:
+ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
+ {
+ struct sock *sk = sock->sk;
+- struct unix_sock *u;
++ struct unix_address *addr;
+ DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
+ int err = 0;
+
+@@ -1470,19 +1484,15 @@ static int unix_getname(struct socket *s
+ sock_hold(sk);
+ }
+
+- u = unix_sk(sk);
+- unix_state_lock(sk);
+- if (!u->addr) {
++ addr = smp_load_acquire(&unix_sk(sk)->addr);
++ if (!addr) {
+ sunaddr->sun_family = AF_UNIX;
+ sunaddr->sun_path[0] = 0;
+ *uaddr_len = sizeof(short);
+ } else {
+- struct unix_address *addr = u->addr;
+-
+ *uaddr_len = addr->len;
+ memcpy(sunaddr, addr->name, *uaddr_len);
+ }
+- unix_state_unlock(sk);
+ sock_put(sk);
+ out:
+ return err;
+@@ -2096,11 +2106,11 @@ static int unix_seqpacket_recvmsg(struct
+
+ static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
+ {
+- struct unix_sock *u = unix_sk(sk);
++ struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
+
+- if (u->addr) {
+- msg->msg_namelen = u->addr->len;
+- memcpy(msg->msg_name, u->addr->name, u->addr->len);
++ if (addr) {
++ msg->msg_namelen = addr->len;
++ memcpy(msg->msg_name, addr->name, addr->len);
+ }
+ }
+
+@@ -2608,15 +2618,14 @@ static int unix_open_file(struct sock *s
+ if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
+ return -EPERM;
+
+- unix_state_lock(sk);
++ if (!smp_load_acquire(&unix_sk(sk)->addr))
++ return -ENOENT;
++
+ path = unix_sk(sk)->path;
+- if (!path.dentry) {
+- unix_state_unlock(sk);
++ if (!path.dentry)
+ return -ENOENT;
+- }
+
+ path_get(&path);
+- unix_state_unlock(sk);
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0)
+@@ -2856,7 +2865,7 @@ static int unix_seq_show(struct seq_file
+ (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
+ sock_i_ino(s));
+
+- if (u->addr) {
++ if (u->addr) { // under unix_table_lock here
+ int i, len;
+ seq_putc(seq, ' ');
+
+--- a/net/unix/diag.c
++++ b/net/unix/diag.c
+@@ -10,7 +10,8 @@
+
+ static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
+ {
+- struct unix_address *addr = unix_sk(sk)->addr;
++ /* might or might not have unix_table_lock */
++ struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
+
+ if (!addr)
+ return 0;
+--- a/security/lsm_audit.c
++++ b/security/lsm_audit.c
+@@ -321,6 +321,7 @@ static void dump_common_audit_data(struc
+ if (a->u.net->sk) {
+ struct sock *sk = a->u.net->sk;
+ struct unix_sock *u;
++ struct unix_address *addr;
+ int len = 0;
+ char *p = NULL;
+
+@@ -351,14 +352,15 @@ static void dump_common_audit_data(struc
+ #endif
+ case AF_UNIX:
+ u = unix_sk(sk);
++ addr = smp_load_acquire(&u->addr);
++ if (!addr)
++ break;
+ if (u->path.dentry) {
+ audit_log_d_path(ab, " path=", &u->path);
+ break;
+ }
+- if (!u->addr)
+- break;
+- len = u->addr->len-sizeof(short);
+- p = &u->addr->name->sun_path[0];
++ len = addr->len-sizeof(short);
++ p = &addr->name->sun_path[0];
+ audit_log_format(ab, " path=");
+ if (*p)
+ audit_log_untrustedstring(ab, p);
diff --git a/patches.suse/mpls-Return-error-for-RTA_GATEWAY-attribute.patch b/patches.suse/mpls-Return-error-for-RTA_GATEWAY-attribute.patch
new file mode 100644
index 0000000000..bc77bbe849
--- /dev/null
+++ b/patches.suse/mpls-Return-error-for-RTA_GATEWAY-attribute.patch
@@ -0,0 +1,32 @@
+From: David Ahern <dsahern@gmail.com>
+Date: Tue, 26 Feb 2019 09:00:04 -0800
+Subject: mpls: Return error for RTA_GATEWAY attribute
+Git-commit: be48220edd48ca0d569782992840488a52373a24
+Patch-mainline: v5.0
+References: networking-stable-19_03_07
+
+MPLS does not support nexthops with an MPLS address family.
+Specifically, it does not handle RTA_GATEWAY attribute. Make it
+clear by returning an error.
+
+[js] no extack in 4.12 here yet.
+
+Fixes: 03c0566542f4c ("mpls: Netlink commands to add, remove, and dump routes")
+Signed-off-by: David Ahern <dsahern@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/mpls/af_mpls.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/mpls/af_mpls.c
++++ b/net/mpls/af_mpls.c
+@@ -1712,6 +1712,8 @@ static int rtm_to_route_config(struct sk
+
+ break;
+ }
++ case RTA_GATEWAY:
++ goto errout;
+ case RTA_VIA:
+ {
+ if (nla_get_via(nla, &cfg->rc_via_alen,
diff --git a/patches.suse/net-Add-__icmp_send-helper.patch b/patches.suse/net-Add-__icmp_send-helper.patch
new file mode 100644
index 0000000000..a77f3fb4a7
--- /dev/null
+++ b/patches.suse/net-Add-__icmp_send-helper.patch
@@ -0,0 +1,73 @@
+From: Nazarov Sergey <s-nazarov@yandex.ru>
+Date: Mon, 25 Feb 2019 19:24:15 +0300
+Subject: net: Add __icmp_send helper.
+Git-commit: 9ef6b42ad6fd7929dd1b6092cb02014e382c6a91
+Patch-mainline: v5.0
+References: networking-stable-19_03_07
+
+Add __icmp_send function having ip_options struct parameter
+
+Signed-off-by: Sergey Nazarov <s-nazarov@yandex.ru>
+Reviewed-by: Paul Moore <paul@paul-moore.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ include/net/icmp.h | 9 ++++++++-
+ net/ipv4/icmp.c | 7 ++++---
+ 2 files changed, 12 insertions(+), 4 deletions(-)
+
+--- a/include/net/icmp.h
++++ b/include/net/icmp.h
+@@ -22,6 +22,7 @@
+
+ #include <net/inet_sock.h>
+ #include <net/snmp.h>
++#include <net/ip.h>
+
+ struct icmp_err {
+ int errno;
+@@ -39,7 +40,13 @@ struct net_proto_family;
+ struct sk_buff;
+ struct net;
+
+-void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info);
++void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
++ const struct ip_options *opt);
++static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
++{
++ __icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt);
++}
++
+ int icmp_rcv(struct sk_buff *skb);
+ void icmp_err(struct sk_buff *skb, u32 info);
+ int icmp_init(void);
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -573,7 +573,8 @@ relookup_failed:
+ * MUST reply to only the first fragment.
+ */
+
+-void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
++void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
++ const struct ip_options *opt)
+ {
+ struct iphdr *iph;
+ int room;
+@@ -694,7 +695,7 @@ void icmp_send(struct sk_buff *skb_in, i
+ iph->tos;
+ mark = IP4_REPLY_MARK(net, skb_in->mark);
+
+- if (ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in))
++ if (__ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in, opt))
+ goto out_unlock;
+
+
+@@ -747,7 +748,7 @@ out_bh_enable:
+ local_bh_enable();
+ out:;
+ }
+-EXPORT_SYMBOL(icmp_send);
++EXPORT_SYMBOL(__icmp_send);
+
+
+ static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
diff --git a/patches.suse/net-Add-header-for-usage-of-fls64.patch b/patches.suse/net-Add-header-for-usage-of-fls64.patch
new file mode 100644
index 0000000000..8a0e0fbb6f
--- /dev/null
+++ b/patches.suse/net-Add-header-for-usage-of-fls64.patch
@@ -0,0 +1,25 @@
+From: "David S. Miller" <davem@davemloft.net>
+Date: Sat, 16 Feb 2019 13:44:39 -0800
+Subject: net: Add header for usage of fls64()
+Git-commit: 8681ef1f3d295bd3600315325f3b3396d76d02f6
+Patch-mainline: v5.0-rc8
+References: networking-stable-19_02_20
+
+Fixes: 3b89ea9c5902 ("net: Fix for_each_netdev_feature on Big endian")
+Suggested-by: Eric Dumazet <eric.dumazet@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ include/linux/netdev_features.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/include/linux/netdev_features.h
++++ b/include/linux/netdev_features.h
+@@ -11,6 +11,7 @@
+ #define _LINUX_NETDEV_FEATURES_H
+
+ #include <linux/types.h>
++#include <linux/bitops.h>
+ #include <asm/byteorder.h>
+
+ typedef u64 netdev_features_t;
diff --git a/patches.suse/net-Do-not-allocate-page-fragments-that-are-not-skb-.patch b/patches.suse/net-Do-not-allocate-page-fragments-that-are-not-skb-.patch
new file mode 100644
index 0000000000..b743af5a39
--- /dev/null
+++ b/patches.suse/net-Do-not-allocate-page-fragments-that-are-not-skb-.patch
@@ -0,0 +1,43 @@
+From: Alexander Duyck <alexander.h.duyck@linux.intel.com>
+Date: Fri, 15 Feb 2019 14:44:18 -0800
+Subject: net: Do not allocate page fragments that are not skb aligned
+Git-commit: 3bed3cc4156eedf652b4df72bdb35d4f1a2a739d
+Patch-mainline: v5.0-rc8
+References: networking-stable-19_02_20
+
+This patch addresses the fact that there are drivers, specifically tun,
+that will call into the network page fragment allocators with buffer sizes
+that are not cache aligned. Doing this could result in data alignment
+and DMA performance issues as these fragment pools are also shared with the
+skb allocator and any other devices that will use napi_alloc_frags or
+netdev_alloc_frags.
+
+Fixes: ffde7328a36d ("net: Split netdev_alloc_frag into __alloc_page_frag and add __napi_alloc_frag")
+Reported-by: Jann Horn <jannh@google.com>
+Signed-off-by: Alexander Duyck <alexander.h.duyck@linux.intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/core/skbuff.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -382,6 +382,8 @@ static void *__netdev_alloc_frag(unsigne
+ */
+ void *netdev_alloc_frag(unsigned int fragsz)
+ {
++ fragsz = SKB_DATA_ALIGN(fragsz);
++
+ return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
+ }
+ EXPORT_SYMBOL(netdev_alloc_frag);
+@@ -395,6 +397,8 @@ static void *__napi_alloc_frag(unsigned
+
+ void *napi_alloc_frag(unsigned int fragsz)
+ {
++ fragsz = SKB_DATA_ALIGN(fragsz);
++
+ return __napi_alloc_frag(fragsz, GFP_ATOMIC);
+ }
+ EXPORT_SYMBOL(napi_alloc_frag);
diff --git a/patches.suse/net-Fix-for_each_netdev_feature-on-Big-endian.patch b/patches.suse/net-Fix-for_each_netdev_feature-on-Big-endian.patch
new file mode 100644
index 0000000000..a4d5ef7ef5
--- /dev/null
+++ b/patches.suse/net-Fix-for_each_netdev_feature-on-Big-endian.patch
@@ -0,0 +1,86 @@
+From: Hauke Mehrtens <hauke.mehrtens@intel.com>
+Date: Fri, 15 Feb 2019 17:58:54 +0100
+Subject: net: Fix for_each_netdev_feature on Big endian
+Git-commit: 3b89ea9c5902acccdbbdec307c85edd1bf52515e
+Patch-mainline: v5.0-rc8
+References: networking-stable-19_02_20
+
+The features attribute is of type u64 and stored in the native endianes on
+the system. The for_each_set_bit() macro takes a pointer to a 32 bit array
+and goes over the bits in this area. On little Endian systems this also
+works with an u64 as the most significant bit is on the highest address,
+but on big endian the words are swapped. When we expect bit 15 here we get
+bit 47 (15 + 32).
+
+This patch converts it more or less to its own for_each_set_bit()
+implementation which works on 64 bit integers directly. This is then
+completely in host endianness and should work like expected.
+
+Fixes: fd867d51f ("net/core: generic support for disabling netdev features down stack")
+Signed-off-by: Hauke Mehrtens <hauke.mehrtens@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ include/linux/netdev_features.h | 23 +++++++++++++++++++++--
+ net/core/dev.c | 4 ++--
+ 2 files changed, 23 insertions(+), 4 deletions(-)
+
+--- a/include/linux/netdev_features.h
++++ b/include/linux/netdev_features.h
+@@ -11,6 +11,7 @@
+ #define _LINUX_NETDEV_FEATURES_H
+
+ #include <linux/types.h>
++#include <asm/byteorder.h>
+
+ typedef u64 netdev_features_t;
+
+@@ -146,8 +147,26 @@ enum {
+ #define NETIF_F_HW_ESP_TX_CSUM __NETIF_F(HW_ESP_TX_CSUM)
+ #define NETIF_F_RX_UDP_TUNNEL_PORT __NETIF_F(RX_UDP_TUNNEL_PORT)
+
+-#define for_each_netdev_feature(mask_addr, bit) \
+- for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
++/* Finds the next feature with the highest number of the range of start till 0.
++ */
++static inline int find_next_netdev_feature(u64 feature, unsigned long start)
++{
++ /* like BITMAP_LAST_WORD_MASK() for u64
++ * this sets the most significant 64 - start to 0.
++ */
++ feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1));
++
++ return fls64(feature) - 1;
++}
++
++/* This goes for the MSB to the LSB through the set feature bits,
++ * mask_addr should be a u64 and bit an int
++ */
++#define for_each_netdev_feature(mask_addr, bit) \
++ for ((bit) = find_next_netdev_feature((mask_addr), \
++ NETDEV_FEATURE_COUNT); \
++ (bit) >= 0; \
++ (bit) = find_next_netdev_feature((mask_addr), (bit) - 1))
+
+ /* Features valid for ethtool to change */
+ /* = all defined minus driver/device-class-related */
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -7206,7 +7206,7 @@ static netdev_features_t netdev_sync_upp
+ netdev_features_t feature;
+ int feature_bit;
+
+- for_each_netdev_feature(&upper_disables, feature_bit) {
++ for_each_netdev_feature(upper_disables, feature_bit) {
+ feature = __NETIF_F_BIT(feature_bit);
+ if (!(upper->wanted_features & feature)
+ && (features & feature)) {
+@@ -7226,7 +7226,7 @@ static void netdev_sync_lower_features(s
+ netdev_features_t feature;
+ int feature_bit;
+
+- for_each_netdev_feature(&upper_disables, feature_bit) {
++ for_each_netdev_feature(upper_disables, feature_bit) {
+ feature = __NETIF_F_BIT(feature_bit);
+ if (!(features & feature) && (lower->features & feature)) {
+ netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
diff --git a/patches.suse/net-Set-rtm_table-to-RT_TABLE_COMPAT-for-ipv6-for-ta.patch b/patches.suse/net-Set-rtm_table-to-RT_TABLE_COMPAT-for-ipv6-for-ta.patch
new file mode 100644
index 0000000000..7168eecbed
--- /dev/null
+++ b/patches.suse/net-Set-rtm_table-to-RT_TABLE_COMPAT-for-ipv6-for-ta.patch
@@ -0,0 +1,30 @@
+From: Kalash Nainwal <kalash@arista.com>
+Date: Wed, 20 Feb 2019 16:23:04 -0800
+Subject: net: Set rtm_table to RT_TABLE_COMPAT for ipv6 for tables > 255
+Git-commit: 97f0082a0592212fc15d4680f5a4d80f79a1687c
+Patch-mainline: v5.0-rc8
+References: networking-stable-19_03_15
+
+Set rtm_table to RT_TABLE_COMPAT for ipv6 for tables > 255 to
+keep legacy software happy. This is similar to what was done for
+ipv4 in commit 709772e6e065 ("net: Fix routing tables with
+id > 255 for legacy software").
+
+Signed-off-by: Kalash Nainwal <kalash@arista.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/ipv6/route.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -3373,7 +3373,7 @@ static int rt6_fill_node(struct net *net
+ table = rt->rt6i_table->tb6_id;
+ else
+ table = RT6_TABLE_UNSPEC;
+- rtm->rtm_table = table;
++ rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
+ if (nla_put_u32(skb, RTA_TABLE, table))
+ goto nla_put_failure;
+ if (rt->rt6i_flags & RTF_REJECT) {
diff --git a/patches.suse/net-avoid-false-positives-in-untrusted-gso-validatio.patch b/patches.suse/net-avoid-false-positives-in-untrusted-gso-validatio.patch
new file mode 100644
index 0000000000..faa731514c
--- /dev/null
+++ b/patches.suse/net-avoid-false-positives-in-untrusted-gso-validatio.patch
@@ -0,0 +1,51 @@
+From: Willem de Bruijn <willemb@google.com>
+Date: Mon, 18 Feb 2019 23:37:12 -0500
+Subject: net: avoid false positives in untrusted gso validation
+Git-commit: 9e8db5913264d3967b93c765a6a9e464d9c473db
+Patch-mainline: v5.0-rc8
+References: git-fixes
+
+GSO packets with vnet_hdr must conform to a small set of gso_types.
+The below commit uses flow dissection to drop packets that do not.
+
+But it has false positives when the skb is not fully initialized.
+Dissection needs skb->protocol and skb->network_header.
+
+Infer skb->protocol from gso_type as the two must agree.
+SKB_GSO_UDP can use both ipv4 and ipv6, so try both.
+
+Exclude callers for which network header offset is not known.
+
+Fixes: d5be7f632bad ("net: validate untrusted gso packets without csum offload")
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ include/linux/virtio_net.h | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -60,10 +60,20 @@ static inline int virtio_net_hdr_to_skb(
+ /* gso packets without NEEDS_CSUM do not set transport_offset.
+ * probe and drop if does not match one of the above types.
+ */
+- if (gso_type) {
++ if (gso_type && skb->network_header) {
++ if (!skb->protocol)
++ virtio_net_hdr_set_proto(skb, hdr);
++retry:
+ skb_probe_transport_header(skb, -1);
+- if (!skb_transport_header_was_set(skb))
++ if (!skb_transport_header_was_set(skb)) {
++ /* UFO does not specify ipv4 or 6: try both */
++ if (gso_type & SKB_GSO_UDP &&
++ skb->protocol == htons(ETH_P_IP)) {
++ skb->protocol = htons(ETH_P_IPV6);
++ goto retry;
++ }
+ return -EINVAL;
++ }
+ }
+ }
+
diff --git a/patches.suse/net-avoid-use-IPCB-in-cipso_v4_error.patch b/patches.suse/net-avoid-use-IPCB-in-cipso_v4_error.patch
new file mode 100644
index 0000000000..ad7b8d18aa
--- /dev/null
+++ b/patches.suse/net-avoid-use-IPCB-in-cipso_v4_error.patch
@@ -0,0 +1,101 @@
+From: Nazarov Sergey <s-nazarov@yandex.ru>
+Date: Mon, 25 Feb 2019 19:27:15 +0300
+Subject: net: avoid use IPCB in cipso_v4_error
+Git-commit: 3da1ed7ac398f34fff1694017a07054d69c5f5c5
+Patch-mainline: v5.0
+References: networking-stable-19_03_07
+
+Extract IP options in cipso_v4_error and use __icmp_send.
+
+Signed-off-by: Sergey Nazarov <s-nazarov@yandex.ru>
+Acked-by: Paul Moore <paul@paul-moore.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ include/net/ip.h | 2 ++
+ net/ipv4/cipso_ipv4.c | 17 +++++++++++++++--
+ net/ipv4/ip_options.c | 22 +++++++++++++++++-----
+ 3 files changed, 34 insertions(+), 7 deletions(-)
+
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -584,6 +584,8 @@ static inline int ip_options_echo(struct
+ }
+
+ void ip_options_fragment(struct sk_buff *skb);
++int __ip_options_compile(struct net *net, struct ip_options *opt,
++ struct sk_buff *skb, __be32 *info);
+ int ip_options_compile(struct net *net, struct ip_options *opt,
+ struct sk_buff *skb);
+ int ip_options_get(struct net *net, struct ip_options_rcu **optp,
+--- a/net/ipv4/cipso_ipv4.c
++++ b/net/ipv4/cipso_ipv4.c
+@@ -1732,13 +1732,26 @@ validate_return:
+ */
+ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
+ {
++ unsigned char optbuf[sizeof(struct ip_options) + 40];
++ struct ip_options *opt = (struct ip_options *)optbuf;
++
+ if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES)
+ return;
+
++ /*
++ * We might be called above the IP layer,
++ * so we can not use icmp_send and IPCB here.
++ */
++
++ memset(opt, 0, sizeof(struct ip_options));
++ opt->optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
++ if (__ip_options_compile(dev_net(skb->dev), opt, skb, NULL))
++ return;
++
+ if (gateway)
+- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0);
++ __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0, opt);
+ else
+- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0);
++ __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0, opt);
+ }
+
+ /**
+--- a/net/ipv4/ip_options.c
++++ b/net/ipv4/ip_options.c
+@@ -253,8 +253,9 @@ static void spec_dst_fill(__be32 *spec_d
+ * If opt == NULL, then skb->data should point to IP header.
+ */
+
+-int ip_options_compile(struct net *net,
+- struct ip_options *opt, struct sk_buff *skb)
++int __ip_options_compile(struct net *net,
++ struct ip_options *opt, struct sk_buff *skb,
++ __be32 *info)
+ {
+ __be32 spec_dst = htonl(INADDR_ANY);
+ unsigned char *pp_ptr = NULL;
+@@ -470,11 +471,22 @@ eol:
+ return 0;
+
+ error:
+- if (skb) {
+- icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((pp_ptr-iph)<<24));
+- }
++ if (info)
++ *info = htonl((pp_ptr-iph)<<24);
+ return -EINVAL;
+ }
++
++int ip_options_compile(struct net *net,
++ struct ip_options *opt, struct sk_buff *skb)
++{
++ int ret;
++ __be32 info;
++
++ ret = __ip_options_compile(net, opt, skb, &info);
++ if (ret != 0 && skb)
++ icmp_send(skb, ICMP_PARAMETERPROB, 0, info);
++ return ret;
++}
+ EXPORT_SYMBOL(ip_options_compile);
+
+ /*
diff --git a/patches.suse/net-dsa-mv88e6xxx-Fix-u64-statistics.patch b/patches.suse/net-dsa-mv88e6xxx-Fix-u64-statistics.patch
new file mode 100644
index 0000000000..66ef188142
--- /dev/null
+++ b/patches.suse/net-dsa-mv88e6xxx-Fix-u64-statistics.patch
@@ -0,0 +1,31 @@
+From: Andrew Lunn <andrew@lunn.ch>
+Date: Thu, 28 Feb 2019 18:14:03 +0100
+Subject: net: dsa: mv88e6xxx: Fix u64 statistics
+Git-commit: 6e46e2d821bb22b285ae8187959096b65d063b0d
+Patch-mainline: v5.0
+References: networking-stable-19_03_07
+
+The switch maintains u64 counters for the number of octets sent and
+received. These are kept as two u32's which need to be combined. Fix
+the combing, which wrongly worked on u16's.
+
+Fixes: 80c4627b2719 ("dsa: mv88x6xxx: Refactor getting a single statistic")
+Reported-by: Chris Healy <Chris.Healy@zii.aero>
+Signed-off-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ drivers/net/dsa/mv88e6xxx/chip.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -869,7 +869,7 @@ static uint64_t _mv88e6xxx_get_ethtool_s
+ default:
+ return UINT64_MAX;
+ }
+- value = (((u64)high) << 16) | low;
++ value = (((u64)high) << 32) | low;
+ return value;
+ }
+
diff --git a/patches.suse/net-fix-IPv6-prefix-route-residue.patch b/patches.suse/net-fix-IPv6-prefix-route-residue.patch
new file mode 100644
index 0000000000..fc6a2a7561
--- /dev/null
+++ b/patches.suse/net-fix-IPv6-prefix-route-residue.patch
@@ -0,0 +1,44 @@
+From: Zhiqiang Liu <liuzhiqiang26@huawei.com>
+Date: Mon, 11 Feb 2019 10:57:46 +0800
+Subject: net: fix IPv6 prefix route residue
+Git-commit: e75913c93f7cd5f338ab373c34c93a655bd309cb
+Patch-mainline: v5.0-rc7
+References: networking-stable-19_02_20
+
+Follow those steps:
+ # ip addr add 2001:123::1/32 dev eth0
+ # ip addr add 2001:123:456::2/64 dev eth0
+ # ip addr del 2001:123::1/32 dev eth0
+ # ip addr del 2001:123:456::2/64 dev eth0
+and then prefix route of 2001:123::1/32 will still exist.
+
+This is because ipv6_prefix_equal in check_cleanup_prefix_route
+func does not check whether two IPv6 addresses have the same
+prefix length. If the prefix of one address starts with another
+shorter address prefix, even though their prefix lengths are
+different, the return value of ipv6_prefix_equal is true.
+
+Here I add a check of whether two addresses have the same prefix
+to decide whether their prefixes are equal.
+
+Fixes: 5b84efecb7d9 ("ipv6 addrconf: don't cleanup prefix route for IFA_F_NOPREFIXROUTE")
+Signed-off-by: Zhiqiang Liu <liuzhiqiang26@huawei.com>
+Reported-by: Wenhao Zhang <zhangwenhao8@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/ipv6/addrconf.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -1109,7 +1109,8 @@ check_cleanup_prefix_route(struct inet6_
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
+ if (ifa == ifp)
+ continue;
+- if (!ipv6_prefix_equal(&ifa->addr, &ifp->addr,
++ if (ifa->prefix_len != ifp->prefix_len ||
++ !ipv6_prefix_equal(&ifa->addr, &ifp->addr,
+ ifp->prefix_len))
+ continue;
+ if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
diff --git a/patches.suse/net-hsr-fix-memory-leak-in-hsr_dev_finalize.patch b/patches.suse/net-hsr-fix-memory-leak-in-hsr_dev_finalize.patch
new file mode 100644
index 0000000000..62ed0e51b9
--- /dev/null
+++ b/patches.suse/net-hsr-fix-memory-leak-in-hsr_dev_finalize.patch
@@ -0,0 +1,109 @@
+From: Mao Wenan <maowenan@huawei.com>
+Date: Wed, 6 Mar 2019 22:45:01 +0800
+Subject: net: hsr: fix memory leak in hsr_dev_finalize()
+Git-commit: 6caabe7f197d3466d238f70915d65301f1716626
+Patch-mainline: v5.1-rc1
+References: networking-stable-19_03_15
+
+If hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER) failed to
+add port, it directly returns res and forgets to free the node
+that allocated in hsr_create_self_node(), and forgets to delete
+the node->mac_list linked in hsr->self_node_db.
+
+BUG: memory leak
+unreferenced object 0xffff8881cfa0c780 (size 64):
+ comm "syz-executor.0", pid 2077, jiffies 4294717969 (age 2415.377s)
+ hex dump (first 32 bytes):
+ e0 c7 a0 cf 81 88 ff ff 00 02 00 00 00 00 ad de ................
+ 00 e6 49 cd 81 88 ff ff c0 9b 87 d0 81 88 ff ff ..I.............
+ backtrace:
+ [<00000000e2ff5070>] hsr_dev_finalize+0x736/0x960 [hsr]
+ [<000000003ed2e597>] hsr_newlink+0x2b2/0x3e0 [hsr]
+ [<000000003fa8c6b6>] __rtnl_newlink+0xf1f/0x1600 net/core/rtnetlink.c:3182
+ [<000000001247a7ad>] rtnl_newlink+0x66/0x90 net/core/rtnetlink.c:3240
+ [<00000000e7d1b61d>] rtnetlink_rcv_msg+0x54e/0xb90 net/core/rtnetlink.c:5130
+ [<000000005556bd3a>] netlink_rcv_skb+0x129/0x340 net/netlink/af_netlink.c:2477
+ [<00000000741d5ee6>] netlink_unicast_kernel net/netlink/af_netlink.c:1310 [inline]
+ [<00000000741d5ee6>] netlink_unicast+0x49a/0x650 net/netlink/af_netlink.c:1336
+ [<000000009d56f9b7>] netlink_sendmsg+0x88b/0xdf0 net/netlink/af_netlink.c:1917
+ [<0000000046b35c59>] sock_sendmsg_nosec net/socket.c:621 [inline]
+ [<0000000046b35c59>] sock_sendmsg+0xc3/0x100 net/socket.c:631
+ [<00000000d208adc9>] __sys_sendto+0x33e/0x560 net/socket.c:1786
+ [<00000000b582837a>] __do_sys_sendto net/socket.c:1798 [inline]
+ [<00000000b582837a>] __se_sys_sendto net/socket.c:1794 [inline]
+ [<00000000b582837a>] __x64_sys_sendto+0xdd/0x1b0 net/socket.c:1794
+ [<00000000c866801d>] do_syscall_64+0x147/0x600 arch/x86/entry/common.c:290
+ [<00000000fea382d9>] entry_SYSCALL_64_after_hwframe+0x49/0xbe
+ [<00000000e01dacb3>] 0xffffffffffffffff
+
+Fixes: c5a759117210 ("net/hsr: Use list_head (and rcu) instead of array for slave devices.")
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Mao Wenan <maowenan@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/hsr/hsr_device.c | 4 +++-
+ net/hsr/hsr_framereg.c | 12 ++++++++++++
+ net/hsr/hsr_framereg.h | 1 +
+ 3 files changed, 16 insertions(+), 1 deletion(-)
+
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index b8cd43c9ed5b..c4676bacb8db 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -486,7 +486,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
+
+ res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER);
+ if (res)
+- return res;
++ goto err_add_port;
+
+ res = register_netdevice(hsr_dev);
+ if (res)
+@@ -506,6 +506,8 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
+ fail:
+ hsr_for_each_port(hsr, port)
+ hsr_del_port(port);
++err_add_port:
++ hsr_del_node(&hsr->self_node_db);
+
+ return res;
+ }
+diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
+index 286ceb41ac0c..9af16cb68f76 100644
+--- a/net/hsr/hsr_framereg.c
++++ b/net/hsr/hsr_framereg.c
+@@ -124,6 +124,18 @@ int hsr_create_self_node(struct list_head *self_node_db,
+ return 0;
+ }
+
++void hsr_del_node(struct list_head *self_node_db)
++{
++ struct hsr_node *node;
++
++ rcu_read_lock();
++ node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list);
++ rcu_read_unlock();
++ if (node) {
++ list_del_rcu(&node->mac_list);
++ kfree(node);
++ }
++}
+
+ /* Allocate an hsr_node and add it to node_db. 'addr' is the node's AddressA;
+ * seq_out is used to initialize filtering of outgoing duplicate frames
+diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
+index 370b45998121..531fd3dfcac1 100644
+--- a/net/hsr/hsr_framereg.h
++++ b/net/hsr/hsr_framereg.h
+@@ -16,6 +16,7 @@
+
+ struct hsr_node;
+
++void hsr_del_node(struct list_head *self_node_db);
+ struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
+ u16 seq_out);
+ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
+--
+2.21.0
+
diff --git a/patches.suse/net-hsr-fix-possible-crash-in-add_timer.patch b/patches.suse/net-hsr-fix-possible-crash-in-add_timer.patch
new file mode 100644
index 0000000000..5cabd7f457
--- /dev/null
+++ b/patches.suse/net-hsr-fix-possible-crash-in-add_timer.patch
@@ -0,0 +1,133 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 7 Mar 2019 09:36:33 -0800
+Subject: net/hsr: fix possible crash in add_timer()
+Git-commit: 1e027960edfaa6a43f9ca31081729b716598112b
+Patch-mainline: v5.1-rc1
+References: networking-stable-19_03_15
+
+syzbot found another add_timer() issue, this time in net/hsr [1]
+
+Let's use mod_timer() which is safe.
+
+[1]
+kernel BUG at kernel/time/timer.c:1136!
+invalid opcode: 0000 [#1] PREEMPT SMP KASAN
+CPU: 0 PID: 15909 Comm: syz-executor.3 Not tainted 5.0.0+ #97
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+kobject: 'loop2' (00000000f5629718): kobject_uevent_env
+RIP: 0010:add_timer kernel/time/timer.c:1136 [inline]
+RIP: 0010:add_timer+0x654/0xbe0 kernel/time/timer.c:1134
+Code: 0f 94 c5 31 ff 44 89 ee e8 09 61 0f 00 45 84 ed 0f 84 77 fd ff ff e8 bb 5f 0f 00 e8 07 10 a0 ff e9 68 fd ff ff e8 ac 5f 0f 00 <0f> 0b e8 a5 5f 0f 00 0f 0b e8 9e 5f 0f 00 4c 89 b5 58 ff ff ff e9
+RSP: 0018:ffff8880656eeca0 EFLAGS: 00010246
+kobject: 'loop2' (00000000f5629718): fill_kobj_path: path = '/devices/virtual/block/loop2'
+RAX: 0000000000040000 RBX: 1ffff1100caddd9a RCX: ffffc9000c436000
+RDX: 0000000000040000 RSI: ffffffff816056c4 RDI: ffff88806a2f6cc8
+RBP: ffff8880656eed58 R08: ffff888067f4a300 R09: ffff888067f4abc8
+R10: 0000000000000000 R11: 0000000000000000 R12: ffff88806a2f6cc0
+R13: dffffc0000000000 R14: 0000000000000001 R15: ffff8880656eed30
+FS: 00007fc2019bf700(0000) GS:ffff8880ae800000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000000000738000 CR3: 0000000067e8e000 CR4: 00000000001406f0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ hsr_check_announce net/hsr/hsr_device.c:99 [inline]
+ hsr_check_carrier_and_operstate+0x567/0x6f0 net/hsr/hsr_device.c:120
+ hsr_netdev_notify+0x297/0xa00 net/hsr/hsr_main.c:51
+ notifier_call_chain+0xc7/0x240 kernel/notifier.c:93
+ __raw_notifier_call_chain kernel/notifier.c:394 [inline]
+ raw_notifier_call_chain+0x2e/0x40 kernel/notifier.c:401
+ call_netdevice_notifiers_info+0x3f/0x90 net/core/dev.c:1739
+ call_netdevice_notifiers_extack net/core/dev.c:1751 [inline]
+ call_netdevice_notifiers net/core/dev.c:1765 [inline]
+ dev_open net/core/dev.c:1436 [inline]
+ dev_open+0x143/0x160 net/core/dev.c:1424
+ team_port_add drivers/net/team/team.c:1203 [inline]
+ team_add_slave+0xa07/0x15d0 drivers/net/team/team.c:1933
+ do_set_master net/core/rtnetlink.c:2358 [inline]
+ do_set_master+0x1d4/0x230 net/core/rtnetlink.c:2332
+ do_setlink+0x966/0x3510 net/core/rtnetlink.c:2493
+ rtnl_setlink+0x271/0x3b0 net/core/rtnetlink.c:2747
+ rtnetlink_rcv_msg+0x465/0xb00 net/core/rtnetlink.c:5192
+ netlink_rcv_skb+0x17a/0x460 net/netlink/af_netlink.c:2485
+ rtnetlink_rcv+0x1d/0x30 net/core/rtnetlink.c:5210
+ netlink_unicast_kernel net/netlink/af_netlink.c:1310 [inline]
+ netlink_unicast+0x536/0x720 net/netlink/af_netlink.c:1336
+ netlink_sendmsg+0x8ae/0xd70 net/netlink/af_netlink.c:1925
+ sock_sendmsg_nosec net/socket.c:622 [inline]
+ sock_sendmsg+0xdd/0x130 net/socket.c:632
+ sock_write_iter+0x27c/0x3e0 net/socket.c:923
+ call_write_iter include/linux/fs.h:1869 [inline]
+ do_iter_readv_writev+0x5e0/0x8e0 fs/read_write.c:680
+ do_iter_write fs/read_write.c:956 [inline]
+ do_iter_write+0x184/0x610 fs/read_write.c:937
+ vfs_writev+0x1b3/0x2f0 fs/read_write.c:1001
+ do_writev+0xf6/0x290 fs/read_write.c:1036
+ __do_sys_writev fs/read_write.c:1109 [inline]
+ __se_sys_writev fs/read_write.c:1106 [inline]
+ __x64_sys_writev+0x75/0xb0 fs/read_write.c:1106
+ do_syscall_64+0x103/0x610 arch/x86/entry/common.c:290
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+RIP: 0033:0x457f29
+Code: ad b8 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 7b b8 fb ff c3 66 2e 0f 1f 84 00 00 00 00
+RSP: 002b:00007fc2019bec78 EFLAGS: 00000246 ORIG_RAX: 0000000000000014
+RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 0000000000457f29
+RDX: 0000000000000001 RSI: 00000000200000c0 RDI: 0000000000000003
+RBP: 000000000073bf00 R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000246 R12: 00007fc2019bf6d4
+R13: 00000000004c4a60 R14: 00000000004dd218 R15: 00000000ffffffff
+
+Fixes: f421436a591d ("net/hsr: Add support for the High-availability Seamless Redundancy protocol (HSRv0)")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Cc: Arvid Brodin <arvid.brodin@alten.se>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/hsr/hsr_device.c | 14 ++++++--------
+ 1 file changed, 6 insertions(+), 8 deletions(-)
+
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -94,9 +94,8 @@ static void hsr_check_announce(struct ne
+ && (old_operstate != IF_OPER_UP)) {
+ /* Went up */
+ hsr->announce_count = 0;
+- hsr->announce_timer.expires = jiffies +
+- msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
+- add_timer(&hsr->announce_timer);
++ mod_timer(&hsr->announce_timer,
++ jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
+ }
+
+ if ((hsr_dev->operstate != IF_OPER_UP) && (old_operstate == IF_OPER_UP))
+@@ -332,6 +331,7 @@ static void hsr_announce(unsigned long d
+ {
+ struct hsr_priv *hsr;
+ struct hsr_port *master;
++ unsigned long interval;
+
+ hsr = (struct hsr_priv *) data;
+
+@@ -343,18 +343,16 @@ static void hsr_announce(unsigned long d
+ hsr->protVersion);
+ hsr->announce_count++;
+
+- hsr->announce_timer.expires = jiffies +
+- msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
++ interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
+ } else {
+ send_hsr_supervision_frame(master, HSR_TLV_LIFE_CHECK,
+ hsr->protVersion);
+
+- hsr->announce_timer.expires = jiffies +
+- msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
++ interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
+ }
+
+ if (is_admin_up(master->dev))
+- add_timer(&hsr->announce_timer);
++ mod_timer(&hsr->announce_timer, jiffies + interval);
+
+ rcu_read_unlock();
+ }
diff --git a/patches.suse/net-mlx5e-Don-t-overwrite-pedit-action-when-multiple.patch b/patches.suse/net-mlx5e-Don-t-overwrite-pedit-action-when-multiple.patch
new file mode 100644
index 0000000000..8ce9a12d09
--- /dev/null
+++ b/patches.suse/net-mlx5e-Don-t-overwrite-pedit-action-when-multiple.patch
@@ -0,0 +1,97 @@
+From: Tonghao Zhang <xiangxia.m.yue@gmail.com>
+Date: Mon, 28 Jan 2019 15:28:06 -0800
+Subject: net/mlx5e: Don't overwrite pedit action when multiple pedit used
+Git-commit: 218d05ce326f9e1b40a56085431fa1068b43d5d9
+Patch-mainline: v5.0-rc6
+References: networking-stable-19_02_24
+
+In some case, we may use multiple pedit actions to modify packets.
+The command shown as below: the last pedit action is effective.
+
+$ tc filter add dev netdev_rep parent ffff: protocol ip prio 1 \
+ flower skip_sw ip_proto icmp dst_ip 3.3.3.3 \
+ action pedit ex munge ip dst set 192.168.1.100 pipe \
+ action pedit ex munge eth src set 00:00:00:00:00:01 pipe \
+ action pedit ex munge eth dst set 00:00:00:00:00:02 pipe \
+ action csum ip pipe \
+ action tunnel_key set src_ip 1.1.1.100 dst_ip 1.1.1.200 dst_port 4789 id 100 \
+ action mirred egress redirect dev vxlan0
+
+To fix it, we add max_mod_hdr_actions to mlx5e_tc_flow_parse_attr struction,
+max_mod_hdr_actions will store the max pedit action number we support and
+num_mod_hdr_actions indicates how many pedit action we used, and store all
+pedit action to mod_hdr_actions.
+
+Fixes: d79b6df6b10a ("net/mlx5e: Add parsing of TC pedit actions to HW format")
+Cc: Or Gerlitz <ogerlitz@mellanox.com>
+Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
+Reviewed-by: Or Gerlitz <ogerlitz@mellanox.com>
+Acked-by: Saeed Mahameed <saeedm@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 25 ++++++++++++++----------
+ 1 file changed, 15 insertions(+), 10 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -81,6 +81,7 @@ struct mlx5e_tc_flow_parse_attr {
+ struct ip_tunnel_info tun_info;
+ struct mlx5_flow_spec spec;
+ int num_mod_hdr_actions;
++ int max_mod_hdr_actions;
+ void *mod_hdr_actions;
+ int mirred_ifindex;
+ };
+@@ -1125,9 +1126,9 @@ static struct mlx5_fields fields[] = {
+ OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
+ };
+
+-/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
+- * max from the SW pedit action. On success, it says how many HW actions were
+- * actually parsed.
++/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
++ * max from the SW pedit action. On success, attr->num_mod_hdr_actions
++ * says how many HW actions were actually parsed.
+ */
+ static int offload_pedit_fields(struct pedit_headers *masks,
+ struct pedit_headers *vals,
+@@ -1150,9 +1151,11 @@ static int offload_pedit_fields(struct p
+ add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
+
+ action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
+- action = parse_attr->mod_hdr_actions;
+- max_actions = parse_attr->num_mod_hdr_actions;
+- nactions = 0;
++ action = parse_attr->mod_hdr_actions +
++ parse_attr->num_mod_hdr_actions * action_size;
++
++ max_actions = parse_attr->max_mod_hdr_actions;
++ nactions = parse_attr->num_mod_hdr_actions;
+
+ for (i = 0; i < ARRAY_SIZE(fields); i++) {
+ f = &fields[i];
+@@ -1257,7 +1260,7 @@ static int alloc_mod_hdr_actions(struct
+ if (!parse_attr->mod_hdr_actions)
+ return -ENOMEM;
+
+- parse_attr->num_mod_hdr_actions = max_actions;
++ parse_attr->max_mod_hdr_actions = max_actions;
+ return 0;
+ }
+
+@@ -1301,9 +1304,11 @@ static int parse_tc_pedit_action(struct
+ goto out_err;
+ }
+
+- err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
+- if (err)
+- goto out_err;
++ if (!parse_attr->mod_hdr_actions) {
++ err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
++ if (err)
++ goto out_err;
++ }
+
+ err = offload_pedit_fields(masks, vals, parse_attr);
+ if (err < 0)
diff --git a/patches.suse/net-nfc-Fix-NULL-dereference-on-nfc_llcp_build_tlv-f.patch b/patches.suse/net-nfc-Fix-NULL-dereference-on-nfc_llcp_build_tlv-f.patch
new file mode 100644
index 0000000000..86846d7907
--- /dev/null
+++ b/patches.suse/net-nfc-Fix-NULL-dereference-on-nfc_llcp_build_tlv-f.patch
@@ -0,0 +1,154 @@
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Fri, 22 Feb 2019 15:37:58 +0800
+Subject: net: nfc: Fix NULL dereference on nfc_llcp_build_tlv fails
+Git-commit: 58bdd544e2933a21a51eecf17c3f5f94038261b5
+Patch-mainline: v5.0
+References: networking-stable-19_03_07
+
+KASAN report this:
+
+BUG: KASAN: null-ptr-deref in nfc_llcp_build_gb+0x37f/0x540 [nfc]
+Read of size 3 at addr 0000000000000000 by task syz-executor.0/5401
+
+CPU: 0 PID: 5401 Comm: syz-executor.0 Not tainted 5.0.0-rc7+ #45
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014
+Call Trace:
+ __dump_stack lib/dump_stack.c:77 [inline]
+ dump_stack+0xfa/0x1ce lib/dump_stack.c:113
+ kasan_report+0x171/0x18d mm/kasan/report.c:321
+ memcpy+0x1f/0x50 mm/kasan/common.c:130
+ nfc_llcp_build_gb+0x37f/0x540 [nfc]
+ nfc_llcp_register_device+0x6eb/0xb50 [nfc]
+ nfc_register_device+0x50/0x1d0 [nfc]
+ nfcsim_device_new+0x394/0x67d [nfcsim]
+ ? 0xffffffffc1080000
+ nfcsim_init+0x6b/0x1000 [nfcsim]
+ do_one_initcall+0xfa/0x5ca init/main.c:887
+ do_init_module+0x204/0x5f6 kernel/module.c:3460
+ load_module+0x66b2/0x8570 kernel/module.c:3808
+ __do_sys_finit_module+0x238/0x2a0 kernel/module.c:3902
+ do_syscall_64+0x147/0x600 arch/x86/entry/common.c:290
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+RIP: 0033:0x462e99
+Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48
+RSP: 002b:00007f9cb79dcc58 EFLAGS: 00000246 ORIG_RAX: 0000000000000139
+RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462e99
+RDX: 0000000000000000 RSI: 0000000020000280 RDI: 0000000000000003
+RBP: 00007f9cb79dcc70 R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000246 R12: 00007f9cb79dd6bc
+R13: 00000000004bcefb R14: 00000000006f7030 R15: 0000000000000004
+
+nfc_llcp_build_tlv will return NULL on fails, caller should check it,
+otherwise will trigger a NULL dereference.
+
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Fixes: eda21f16a5ed ("NFC: Set MIU and RW values from CONNECT and CC LLCP frames")
+Fixes: d646960f7986 ("NFC: Initial LLCP support")
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/nfc/llcp_commands.c | 20 ++++++++++++++++++++
+ net/nfc/llcp_core.c | 24 ++++++++++++++++++++----
+ 2 files changed, 40 insertions(+), 4 deletions(-)
+
+--- a/net/nfc/llcp_commands.c
++++ b/net/nfc/llcp_commands.c
+@@ -419,6 +419,10 @@ int nfc_llcp_send_connect(struct nfc_llc
+ sock->service_name,
+ sock->service_name_len,
+ &service_name_tlv_length);
++ if (!service_name_tlv) {
++ err = -ENOMEM;
++ goto error_tlv;
++ }
+ size += service_name_tlv_length;
+ }
+
+@@ -429,9 +433,17 @@ int nfc_llcp_send_connect(struct nfc_llc
+
+ miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
+ &miux_tlv_length);
++ if (!miux_tlv) {
++ err = -ENOMEM;
++ goto error_tlv;
++ }
+ size += miux_tlv_length;
+
+ rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
++ if (!rw_tlv) {
++ err = -ENOMEM;
++ goto error_tlv;
++ }
+ size += rw_tlv_length;
+
+ pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len);
+@@ -484,9 +496,17 @@ int nfc_llcp_send_cc(struct nfc_llcp_soc
+
+ miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
+ &miux_tlv_length);
++ if (!miux_tlv) {
++ err = -ENOMEM;
++ goto error_tlv;
++ }
+ size += miux_tlv_length;
+
+ rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
++ if (!rw_tlv) {
++ err = -ENOMEM;
++ goto error_tlv;
++ }
+ size += rw_tlv_length;
+
+ skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size);
+--- a/net/nfc/llcp_core.c
++++ b/net/nfc/llcp_core.c
+@@ -532,10 +532,10 @@ static u8 nfc_llcp_reserve_sdp_ssap(stru
+
+ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
+ {
+- u8 *gb_cur, *version_tlv, version, version_length;
+- u8 *lto_tlv, lto_length;
+- u8 *wks_tlv, wks_length;
+- u8 *miux_tlv, miux_length;
++ u8 *gb_cur, version, version_length;
++ u8 lto_length, wks_length, miux_length;
++ u8 *version_tlv = NULL, *lto_tlv = NULL,
++ *wks_tlv = NULL, *miux_tlv = NULL;
+ __be16 wks = cpu_to_be16(local->local_wks);
+ u8 gb_len = 0;
+ int ret = 0;
+@@ -543,17 +543,33 @@ static int nfc_llcp_build_gb(struct nfc_
+ version = LLCP_VERSION_11;
+ version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version,
+ 1, &version_length);
++ if (!version_tlv) {
++ ret = -ENOMEM;
++ goto out;
++ }
+ gb_len += version_length;
+
+ lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_LTO, &local->lto, 1, &lto_length);
++ if (!lto_tlv) {
++ ret = -ENOMEM;
++ goto out;
++ }
+ gb_len += lto_length;
+
+ pr_debug("Local wks 0x%lx\n", local->local_wks);
+ wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&wks, 2, &wks_length);
++ if (!wks_tlv) {
++ ret = -ENOMEM;
++ goto out;
++ }
+ gb_len += wks_length;
+
+ miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0,
+ &miux_length);
++ if (!miux_tlv) {
++ ret = -ENOMEM;
++ goto out;
++ }
+ gb_len += miux_length;
+
+ gb_len += ARRAY_SIZE(llcp_magic);
diff --git a/patches.suse/net-packet-Set-__GFP_NOWARN-upon-allocation-in-alloc.patch b/patches.suse/net-packet-Set-__GFP_NOWARN-upon-allocation-in-alloc.patch
new file mode 100644
index 0000000000..e91ddbd661
--- /dev/null
+++ b/patches.suse/net-packet-Set-__GFP_NOWARN-upon-allocation-in-alloc.patch
@@ -0,0 +1,79 @@
+From: Christoph Paasch <cpaasch@apple.com>
+Date: Mon, 18 Mar 2019 23:14:52 -0700
+Subject: net/packet: Set __GFP_NOWARN upon allocation in alloc_pg_vec
+Git-commit: 398f0132c14754fcd03c1c4f8e7176d001ce8ea1
+Patch-mainline: v5.1-rc3
+References: git-fixes
+
+Since commit fc62814d690c ("net/packet: fix 4gb buffer limit due to overflow check")
+one can now allocate packet ring buffers >= UINT_MAX. However, syzkaller
+found that that triggers a warning:
+
+[ 21.100000] WARNING: CPU: 2 PID: 2075 at mm/page_alloc.c:4584 __alloc_pages_nod0
+[ 21.101490] Modules linked in:
+[ 21.101921] CPU: 2 PID: 2075 Comm: syz-executor.0 Not tainted 5.0.0 #146
+[ 21.102784] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 0.5.1 01/01/2011
+[ 21.103887] RIP: 0010:__alloc_pages_nodemask+0x2a0/0x630
+[ 21.104640] Code: fe ff ff 65 48 8b 04 25 c0 de 01 00 48 05 90 0f 00 00 41 bd 01 00 00 00 48 89 44 24 48 e9 9c fe 3
+[ 21.107121] RSP: 0018:ffff88805e1cf920 EFLAGS: 00010246
+[ 21.107819] RAX: 0000000000000000 RBX: ffffffff85a488a0 RCX: 0000000000000000
+[ 21.108753] RDX: 0000000000000000 RSI: dffffc0000000000 RDI: 0000000000000000
+[ 21.109699] RBP: 1ffff1100bc39f28 R08: ffffed100bcefb67 R09: ffffed100bcefb67
+[ 21.110646] R10: 0000000000000001 R11: ffffed100bcefb66 R12: 000000000000000d
+[ 21.111623] R13: 0000000000000000 R14: ffff88805e77d888 R15: 000000000000000d
+[ 21.112552] FS: 00007f7c7de05700(0000) GS:ffff88806d100000(0000) knlGS:0000000000000000
+[ 21.113612] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 21.114405] CR2: 000000000065c000 CR3: 000000005e58e006 CR4: 00000000001606e0
+[ 21.115367] Call Trace:
+[ 21.115705] ? __alloc_pages_slowpath+0x21c0/0x21c0
+[ 21.116362] alloc_pages_current+0xac/0x1e0
+[ 21.116923] kmalloc_order+0x18/0x70
+[ 21.117393] kmalloc_order_trace+0x18/0x110
+[ 21.117949] packet_set_ring+0x9d5/0x1770
+[ 21.118524] ? packet_rcv_spkt+0x440/0x440
+[ 21.119094] ? lock_downgrade+0x620/0x620
+[ 21.119646] ? __might_fault+0x177/0x1b0
+[ 21.120177] packet_setsockopt+0x981/0x2940
+[ 21.120753] ? __fget+0x2fb/0x4b0
+[ 21.121209] ? packet_release+0xab0/0xab0
+[ 21.121740] ? sock_has_perm+0x1cd/0x260
+[ 21.122297] ? selinux_secmark_relabel_packet+0xd0/0xd0
+[ 21.123013] ? __fget+0x324/0x4b0
+[ 21.123451] ? selinux_netlbl_socket_setsockopt+0x101/0x320
+[ 21.124186] ? selinux_netlbl_sock_rcv_skb+0x3a0/0x3a0
+[ 21.124908] ? __lock_acquire+0x529/0x3200
+[ 21.125453] ? selinux_socket_setsockopt+0x5d/0x70
+[ 21.126075] ? __sys_setsockopt+0x131/0x210
+[ 21.126533] ? packet_release+0xab0/0xab0
+[ 21.127004] __sys_setsockopt+0x131/0x210
+[ 21.127449] ? kernel_accept+0x2f0/0x2f0
+[ 21.127911] ? ret_from_fork+0x8/0x50
+[ 21.128313] ? do_raw_spin_lock+0x11b/0x280
+[ 21.128800] __x64_sys_setsockopt+0xba/0x150
+[ 21.129271] ? lockdep_hardirqs_on+0x37f/0x560
+[ 21.129769] do_syscall_64+0x9f/0x450
+[ 21.130182] entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+We should allocate with __GFP_NOWARN to handle this.
+
+Cc: Kal Conley <kal.conley@dectris.com>
+Cc: Andrey Konovalov <andreyknvl@google.com>
+Fixes: fc62814d690c ("net/packet: fix 4gb buffer limit due to overflow check")
+Signed-off-by: Christoph Paasch <cpaasch@apple.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/packet/af_packet.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -4233,7 +4233,7 @@ static struct pgv *alloc_pg_vec(struct t
+ struct pgv *pg_vec;
+ int i;
+
+- pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
++ pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
+ if (unlikely(!pg_vec))
+ goto out;
+
diff --git a/patches.suse/net-packet-fix-4gb-buffer-limit-due-to-overflow-chec.patch b/patches.suse/net-packet-fix-4gb-buffer-limit-due-to-overflow-chec.patch
new file mode 100644
index 0000000000..d9817d7924
--- /dev/null
+++ b/patches.suse/net-packet-fix-4gb-buffer-limit-due-to-overflow-chec.patch
@@ -0,0 +1,37 @@
+From: Kal Conley <kal.conley@dectris.com>
+Date: Sun, 10 Feb 2019 09:57:11 +0100
+Subject: net/packet: fix 4gb buffer limit due to overflow check
+Git-commit: fc62814d690cf62189854464f4bd07457d5e9e50
+Patch-mainline: v5.0-rc7
+References: networking-stable-19_02_24
+
+When calculating rb->frames_per_block * req->tp_block_nr the result
+can overflow. Check it for overflow without limiting the total buffer
+size to UINT_MAX.
+
+This change fixes support for packet ring buffers >= UINT_MAX.
+
+Fixes: 8f8d28e4d6d8 ("net/packet: fix overflow in check for tp_frame_nr")
+Signed-off-by: Kal Conley <kal.conley@dectris.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/packet/af_packet.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 3b1a78906bc0..1cd1d83a4be0 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -4292,7 +4292,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
+ if (unlikely(rb->frames_per_block == 0))
+ goto out;
+- if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
++ if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
+ goto out;
+ if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
+ req->tp_frame_nr))
+--
+2.21.0
+
diff --git a/patches.suse/net-sit-fix-UBSAN-Undefined-behaviour-in-check_6rd.patch b/patches.suse/net-sit-fix-UBSAN-Undefined-behaviour-in-check_6rd.patch
new file mode 100644
index 0000000000..372cc164cf
--- /dev/null
+++ b/patches.suse/net-sit-fix-UBSAN-Undefined-behaviour-in-check_6rd.patch
@@ -0,0 +1,74 @@
+From: Miaohe Lin <linmiaohe@huawei.com>
+Date: Mon, 11 Mar 2019 16:29:32 +0800
+Subject: net: sit: fix UBSAN Undefined behaviour in check_6rd
+Git-commit: a843dc4ebaecd15fca1f4d35a97210f72ea1473b
+Patch-mainline: v5.1-rc1
+References: networking-stable-19_03_15
+
+In func check_6rd,tunnel->ip6rd.relay_prefixlen may equal to
+32,so UBSAN complain about it.
+
+UBSAN: Undefined behaviour in net/ipv6/sit.c:781:47
+shift exponent 32 is too large for 32-bit type 'unsigned int'
+CPU: 6 PID: 20036 Comm: syz-executor.0 Not tainted 4.19.27 #2
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1
+04/01/2014
+Call Trace:
+__dump_stack lib/dump_stack.c:77 [inline]
+dump_stack+0xca/0x13e lib/dump_stack.c:113
+ubsan_epilogue+0xe/0x81 lib/ubsan.c:159
+__ubsan_handle_shift_out_of_bounds+0x293/0x2e8 lib/ubsan.c:425
+check_6rd.constprop.9+0x433/0x4e0 net/ipv6/sit.c:781
+try_6rd net/ipv6/sit.c:806 [inline]
+ipip6_tunnel_xmit net/ipv6/sit.c:866 [inline]
+sit_tunnel_xmit+0x141c/0x2720 net/ipv6/sit.c:1033
+__netdev_start_xmit include/linux/netdevice.h:4300 [inline]
+netdev_start_xmit include/linux/netdevice.h:4309 [inline]
+xmit_one net/core/dev.c:3243 [inline]
+dev_hard_start_xmit+0x17c/0x780 net/core/dev.c:3259
+__dev_queue_xmit+0x1656/0x2500 net/core/dev.c:3829
+neigh_output include/net/neighbour.h:501 [inline]
+ip6_finish_output2+0xa36/0x2290 net/ipv6/ip6_output.c:120
+ip6_finish_output+0x3e7/0xa20 net/ipv6/ip6_output.c:154
+NF_HOOK_COND include/linux/netfilter.h:278 [inline]
+ip6_output+0x1e2/0x720 net/ipv6/ip6_output.c:171
+dst_output include/net/dst.h:444 [inline]
+ip6_local_out+0x99/0x170 net/ipv6/output_core.c:176
+ip6_send_skb+0x9d/0x2f0 net/ipv6/ip6_output.c:1697
+ip6_push_pending_frames+0xc0/0x100 net/ipv6/ip6_output.c:1717
+rawv6_push_pending_frames net/ipv6/raw.c:616 [inline]
+rawv6_sendmsg+0x2435/0x3530 net/ipv6/raw.c:946
+inet_sendmsg+0xf8/0x5c0 net/ipv4/af_inet.c:798
+sock_sendmsg_nosec net/socket.c:621 [inline]
+sock_sendmsg+0xc8/0x110 net/socket.c:631
+___sys_sendmsg+0x6cf/0x890 net/socket.c:2114
+__sys_sendmsg+0xf0/0x1b0 net/socket.c:2152
+do_syscall_64+0xc8/0x580 arch/x86/entry/common.c:290
+entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+Signed-off-by: linmiaohe <linmiaohe@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/ipv6/sit.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 09e440e8dfae..07e21a82ce4c 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -778,8 +778,9 @@ static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
+ pbw0 = tunnel->ip6rd.prefixlen >> 5;
+ pbi0 = tunnel->ip6rd.prefixlen & 0x1f;
+
+- d = (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
+- tunnel->ip6rd.relay_prefixlen;
++ d = tunnel->ip6rd.relay_prefixlen < 32 ?
++ (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
++ tunnel->ip6rd.relay_prefixlen : 0;
+
+ pbi1 = pbi0 - tunnel->ip6rd.relay_prefixlen;
+ if (pbi1 > 0)
+--
+2.21.0
+
diff --git a/patches.suse/net-sit-fix-memory-leak-in-sit_init_net.patch b/patches.suse/net-sit-fix-memory-leak-in-sit_init_net.patch
new file mode 100644
index 0000000000..e4a93c51d5
--- /dev/null
+++ b/patches.suse/net-sit-fix-memory-leak-in-sit_init_net.patch
@@ -0,0 +1,51 @@
+From: Mao Wenan <maowenan@huawei.com>
+Date: Fri, 1 Mar 2019 23:06:40 +0800
+Subject: net: sit: fix memory leak in sit_init_net()
+Git-commit: 07f12b26e21ab359261bf75cfcb424fdc7daeb6d
+Patch-mainline: v5.0
+References: networking-stable-19_03_07
+
+If register_netdev() is failed to register sitn->fb_tunnel_dev,
+it will go to err_reg_dev and forget to free netdev(sitn->fb_tunnel_dev).
+
+BUG: memory leak
+unreferenced object 0xffff888378daad00 (size 512):
+ comm "syz-executor.1", pid 4006, jiffies 4295121142 (age 16.115s)
+ hex dump (first 32 bytes):
+ 00 e6 ed c0 83 88 ff ff 00 00 00 00 00 00 00 00 ................
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+backtrace:
+ [<00000000d6dcb63e>] kvmalloc include/linux/mm.h:577 [inline]
+ [<00000000d6dcb63e>] kvzalloc include/linux/mm.h:585 [inline]
+ [<00000000d6dcb63e>] netif_alloc_netdev_queues net/core/dev.c:8380 [inline]
+ [<00000000d6dcb63e>] alloc_netdev_mqs+0x600/0xcc0 net/core/dev.c:8970
+ [<00000000867e172f>] sit_init_net+0x295/0xa40 net/ipv6/sit.c:1848
+ [<00000000871019fa>] ops_init+0xad/0x3e0 net/core/net_namespace.c:129
+ [<00000000319507f6>] setup_net+0x2ba/0x690 net/core/net_namespace.c:314
+ [<0000000087db4f96>] copy_net_ns+0x1dc/0x330 net/core/net_namespace.c:437
+ [<0000000057efc651>] create_new_namespaces+0x382/0x730 kernel/nsproxy.c:107
+ [<00000000676f83de>] copy_namespaces+0x2ed/0x3d0 kernel/nsproxy.c:165
+ [<0000000030b74bac>] copy_process.part.27+0x231e/0x6db0 kernel/fork.c:1919
+ [<00000000fff78746>] copy_process kernel/fork.c:1713 [inline]
+ [<00000000fff78746>] _do_fork+0x1bc/0xe90 kernel/fork.c:2224
+ [<000000001c2e0d1c>] do_syscall_64+0xc8/0x580 arch/x86/entry/common.c:290
+ [<00000000ec48bd44>] entry_SYSCALL_64_after_hwframe+0x49/0xbe
+ [<0000000039acff8a>] 0xffffffffffffffff
+
+Signed-off-by: Mao Wenan <maowenan@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/ipv6/sit.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1850,6 +1850,7 @@ static int __net_init sit_init_net(struc
+
+ err_reg_dev:
+ ipip6_dev_free(sitn->fb_tunnel_dev);
++ free_netdev(sitn->fb_tunnel_dev);
+ err_alloc_dev:
+ return err;
+ }
diff --git a/patches.suse/net-socket-set-sock-sk-to-NULL-after-calling-proto_o.patch b/patches.suse/net-socket-set-sock-sk-to-NULL-after-calling-proto_o.patch
new file mode 100644
index 0000000000..96b31c5b78
--- /dev/null
+++ b/patches.suse/net-socket-set-sock-sk-to-NULL-after-calling-proto_o.patch
@@ -0,0 +1,83 @@
+From: Eric Biggers <ebiggers@google.com>
+Date: Thu, 21 Feb 2019 14:13:56 -0800
+Subject: net: socket: set sock->sk to NULL after calling proto_ops::release()
+Git-commit: ff7b11aa481f682e0e9711abfeb7d03f5cd612bf
+Patch-mainline: v5.0
+References: networking-stable-19_03_07
+
+Commit 9060cb719e61 ("net: crypto set sk to NULL when af_alg_release.")
+fixed a use-after-free in sockfs_setattr() when an AF_ALG socket is
+closed concurrently with fchownat(). However, it ignored that many
+other proto_ops::release() methods don't set sock->sk to NULL and
+therefore allow the same use-after-free:
+
+ - base_sock_release
+ - bnep_sock_release
+ - cmtp_sock_release
+ - data_sock_release
+ - dn_release
+ - hci_sock_release
+ - hidp_sock_release
+ - iucv_sock_release
+ - l2cap_sock_release
+ - llcp_sock_release
+ - llc_ui_release
+ - rawsock_release
+ - rfcomm_sock_release
+ - sco_sock_release
+ - svc_release
+ - vcc_release
+ - x25_release
+
+Rather than fixing all these and relying on every socket type to get
+this right forever, just make __sock_release() set sock->sk to NULL
+itself after calling proto_ops::release().
+
+Reproducer that produces the KASAN splat when any of these socket types
+are configured into the kernel:
+
+ #include <pthread.h>
+ #include <stdlib.h>
+ #include <sys/socket.h>
+ #include <unistd.h>
+
+ pthread_t t;
+ volatile int fd;
+
+ void *close_thread(void *arg)
+ {
+ for (;;) {
+ usleep(rand() % 100);
+ close(fd);
+ }
+ }
+
+ int main()
+ {
+ pthread_create(&t, NULL, close_thread, NULL);
+ for (;;) {
+ fd = socket(rand() % 50, rand() % 11, 0);
+ fchownat(fd, "", 1000, 1000, 0x1000);
+ close(fd);
+ }
+ }
+
+Fixes: 86741ec25462 ("net: core: Add a UID field to struct sock.")
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Acked-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/socket.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -600,6 +600,7 @@ static void __sock_release(struct socket
+ if (inode)
+ inode_lock(inode);
+ sock->ops->release(sock);
++ sock->sk = NULL;
+ if (inode)
+ inode_unlock(inode);
+ sock->ops = NULL;
diff --git a/patches.suse/net-validate-untrusted-gso-packets-without-csum-offl.patch b/patches.suse/net-validate-untrusted-gso-packets-without-csum-offl.patch
new file mode 100644
index 0000000000..f3d2cf06ed
--- /dev/null
+++ b/patches.suse/net-validate-untrusted-gso-packets-without-csum-offl.patch
@@ -0,0 +1,62 @@
+From: Willem de Bruijn <willemb@google.com>
+Date: Fri, 15 Feb 2019 12:15:47 -0500
+Subject: net: validate untrusted gso packets without csum offload
+Git-commit: d5be7f632bad0f489879eed0ff4b99bd7fe0b74c
+Patch-mainline: v5.0-rc8
+References: networking-stable-19_02_20
+
+Syzkaller again found a path to a kernel crash through bad gso input.
+By building an excessively large packet to cause an skb field to wrap.
+
+If VIRTIO_NET_HDR_F_NEEDS_CSUM was set this would have been dropped in
+skb_partial_csum_set.
+
+GSO packets that do not set checksum offload are suspicious and rare.
+Most callers of virtio_net_hdr_to_skb already pass them to
+skb_probe_transport_header.
+
+Move that test forward, change it to detect parse failure and drop
+packets on failure as those cleary are not one of the legitimate
+VIRTIO_NET_HDR_GSO types.
+
+Fixes: bfd5f4a3d605 ("packet: Add GSO/csum offload support.")
+Fixes: f43798c27684 ("tun: Allow GSO using virtio_net_hdr")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ include/linux/skbuff.h | 2 +-
+ include/linux/virtio_net.h | 9 +++++++++
+ 2 files changed, 10 insertions(+), 1 deletion(-)
+
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2239,7 +2239,7 @@ static inline void skb_probe_transport_h
+ return;
+ else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
+ skb_set_transport_header(skb, keys.control.thoff);
+- else
++ else if (offset_hint >= 0)
+ skb_set_transport_header(skb, offset_hint);
+ }
+
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -56,6 +56,15 @@ static inline int virtio_net_hdr_to_skb(
+
+ if (!skb_partial_csum_set(skb, start, off))
+ return -EINVAL;
++ } else {
++ /* gso packets without NEEDS_CSUM do not set transport_offset.
++ * probe and drop if does not match one of the above types.
++ */
++ if (gso_type) {
++ skb_probe_transport_header(skb, -1);
++ if (!skb_transport_header_was_set(skb))
++ return -EINVAL;
++ }
+ }
+
+ if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
diff --git a/patches.suse/net-x25-fix-a-race-in-x25_bind.patch b/patches.suse/net-x25-fix-a-race-in-x25_bind.patch
new file mode 100644
index 0000000000..2e51e04c4b
--- /dev/null
+++ b/patches.suse/net-x25-fix-a-race-in-x25_bind.patch
@@ -0,0 +1,136 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Sat, 23 Feb 2019 13:24:59 -0800
+Subject: net/x25: fix a race in x25_bind()
+Git-commit: 797a22bd5298c2674d927893f46cadf619dad11d
+Patch-mainline: v5.0-rc8
+References: networking-stable-19_03_15
+
+syzbot was able to trigger another soft lockup [1]
+
+I first thought it was the O(N^2) issue I mentioned in my
+prior fix (f657d22ee1f "net/x25: do not hold the cpu
+too long in x25_new_lci()"), but I eventually found
+that x25_bind() was not checking SOCK_ZAPPED state under
+socket lock protection.
+
+This means that multiple threads can end up calling
+x25_insert_socket() for the same socket, and corrupt x25_list
+
+[1]
+watchdog: BUG: soft lockup - CPU#0 stuck for 123s! [syz-executor.2:10492]
+Modules linked in:
+irq event stamp: 27515
+hardirqs last enabled at (27514): [<ffffffff81006673>] trace_hardirqs_on_thunk+0x1a/0x1c
+hardirqs last disabled at (27515): [<ffffffff8100668f>] trace_hardirqs_off_thunk+0x1a/0x1c
+softirqs last enabled at (32): [<ffffffff8632ee73>] x25_get_neigh+0xa3/0xd0 net/x25/x25_link.c:336
+softirqs last disabled at (34): [<ffffffff86324bc3>] x25_find_socket+0x23/0x140 net/x25/af_x25.c:341
+CPU: 0 PID: 10492 Comm: syz-executor.2 Not tainted 5.0.0-rc7+ #88
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+RIP: 0010:__sanitizer_cov_trace_pc+0x4/0x50 kernel/kcov.c:97
+Code: f4 ff ff ff e8 11 9f ea ff 48 c7 05 12 fb e5 08 00 00 00 00 e9 c8 e9 ff ff 90 90 90 90 90 90 90 90 90 90 90 90 90 55 48 89 e5 <48> 8b 75 08 65 48 8b 04 25 40 ee 01 00 65 8b 15 38 0c 92 7e 81 e2
+RSP: 0018:ffff88806e94fc48 EFLAGS: 00000286 ORIG_RAX: ffffffffffffff13
+RAX: 1ffff1100d84dac5 RBX: 0000000000000001 RCX: ffffc90006197000
+RDX: 0000000000040000 RSI: ffffffff86324bf3 RDI: ffff88806c26d628
+RBP: ffff88806e94fc48 R08: ffff88806c1c6500 R09: fffffbfff1282561
+R10: fffffbfff1282560 R11: ffffffff89412b03 R12: ffff88806c26d628
+R13: ffff888090455200 R14: dffffc0000000000 R15: 0000000000000000
+FS: 00007f3a107e4700(0000) GS:ffff8880ae800000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f3a107e3db8 CR3: 00000000a5544000 CR4: 00000000001406f0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ __x25_find_socket net/x25/af_x25.c:327 [inline]
+ x25_find_socket+0x7d/0x140 net/x25/af_x25.c:342
+ x25_new_lci net/x25/af_x25.c:355 [inline]
+ x25_connect+0x380/0xde0 net/x25/af_x25.c:784
+ __sys_connect+0x266/0x330 net/socket.c:1662
+ __do_sys_connect net/socket.c:1673 [inline]
+ __se_sys_connect net/socket.c:1670 [inline]
+ __x64_sys_connect+0x73/0xb0 net/socket.c:1670
+ do_syscall_64+0x103/0x610 arch/x86/entry/common.c:290
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+RIP: 0033:0x457e29
+Code: ad b8 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 7b b8 fb ff c3 66 2e 0f 1f 84 00 00 00 00
+RSP: 002b:00007f3a107e3c78 EFLAGS: 00000246 ORIG_RAX: 000000000000002a
+RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 0000000000457e29
+RDX: 0000000000000012 RSI: 0000000020000200 RDI: 0000000000000005
+RBP: 000000000073c040 R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000246 R12: 00007f3a107e46d4
+R13: 00000000004be362 R14: 00000000004ceb98 R15: 00000000ffffffff
+Sending NMI from CPU 0 to CPUs 1:
+NMI backtrace for cpu 1
+CPU: 1 PID: 10493 Comm: syz-executor.3 Not tainted 5.0.0-rc7+ #88
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+RIP: 0010:__read_once_size include/linux/compiler.h:193 [inline]
+RIP: 0010:queued_write_lock_slowpath+0x143/0x290 kernel/locking/qrwlock.c:86
+Code: 4c 8d 2c 01 41 83 c7 03 41 0f b6 45 00 41 38 c7 7c 08 84 c0 0f 85 0c 01 00 00 8b 03 3d 00 01 00 00 74 1a f3 90 41 0f b6 55 00 <41> 38 d7 7c eb 84 d2 74 e7 48 89 df e8 cc aa 4e 00 eb dd be 04 00
+RSP: 0018:ffff888085c47bd8 EFLAGS: 00000206
+RAX: 0000000000000300 RBX: ffffffff89412b00 RCX: 1ffffffff1282560
+RDX: 0000000000000000 RSI: 0000000000000004 RDI: ffffffff89412b00
+RBP: ffff888085c47c70 R08: 1ffffffff1282560 R09: fffffbfff1282561
+R10: fffffbfff1282560 R11: ffffffff89412b03 R12: 00000000000000ff
+R13: fffffbfff1282560 R14: 1ffff11010b88f7d R15: 0000000000000003
+FS: 00007fdd04086700(0000) GS:ffff8880ae900000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007fdd04064db8 CR3: 0000000090be0000 CR4: 00000000001406e0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ queued_write_lock include/asm-generic/qrwlock.h:104 [inline]
+ do_raw_write_lock+0x1d6/0x290 kernel/locking/spinlock_debug.c:203
+ __raw_write_lock_bh include/linux/rwlock_api_smp.h:204 [inline]
+ _raw_write_lock_bh+0x3b/0x50 kernel/locking/spinlock.c:312
+ x25_insert_socket+0x21/0xe0 net/x25/af_x25.c:267
+ x25_bind+0x273/0x340 net/x25/af_x25.c:703
+ __sys_bind+0x23f/0x290 net/socket.c:1481
+ __do_sys_bind net/socket.c:1492 [inline]
+ __se_sys_bind net/socket.c:1490 [inline]
+ __x64_sys_bind+0x73/0xb0 net/socket.c:1490
+ do_syscall_64+0x103/0x610 arch/x86/entry/common.c:290
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+RIP: 0033:0x457e29
+
+Fixes: 90c27297a9bf ("X.25 remove bkl in bind")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: andrew hendry <andrew.hendry@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/x25/af_x25.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
+index ec3a828672ef..eff31348e20b 100644
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -679,8 +679,7 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
+ int len, i, rc = 0;
+
+- if (!sock_flag(sk, SOCK_ZAPPED) ||
+- addr_len != sizeof(struct sockaddr_x25) ||
++ if (addr_len != sizeof(struct sockaddr_x25) ||
+ addr->sx25_family != AF_X25) {
+ rc = -EINVAL;
+ goto out;
+@@ -699,9 +698,13 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ }
+
+ lock_sock(sk);
+- x25_sk(sk)->source_addr = addr->sx25_addr;
+- x25_insert_socket(sk);
+- sock_reset_flag(sk, SOCK_ZAPPED);
++ if (sock_flag(sk, SOCK_ZAPPED)) {
++ x25_sk(sk)->source_addr = addr->sx25_addr;
++ x25_insert_socket(sk);
++ sock_reset_flag(sk, SOCK_ZAPPED);
++ } else {
++ rc = -EINVAL;
++ }
+ release_sock(sk);
+ SOCK_DEBUG(sk, "x25_bind: socket is bound\n");
+ out:
+--
+2.21.0
+
diff --git a/patches.suse/net-x25-fix-use-after-free-in-x25_device_event.patch b/patches.suse/net-x25-fix-use-after-free-in-x25_device_event.patch
new file mode 100644
index 0000000000..37d4d1b9b0
--- /dev/null
+++ b/patches.suse/net-x25-fix-use-after-free-in-x25_device_event.patch
@@ -0,0 +1,148 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Sun, 10 Mar 2019 09:07:14 -0700
+Subject: net/x25: fix use-after-free in x25_device_event()
+Git-commit: 95d6ebd53c79522bf9502dbc7e89e0d63f94dae4
+Patch-mainline: v5.1-rc1
+References: networking-stable-19_03_15
+
+In case of failure x25_connect() does a x25_neigh_put(x25->neighbour)
+but forgets to clear x25->neighbour pointer, thus triggering use-after-free.
+
+Since the socket is visible in x25_list, we need to hold x25_list_lock
+to protect the operation.
+
+syzbot report :
+
+BUG: KASAN: use-after-free in x25_kill_by_device net/x25/af_x25.c:217 [inline]
+BUG: KASAN: use-after-free in x25_device_event+0x296/0x2b0 net/x25/af_x25.c:252
+Read of size 8 at addr ffff8880a030edd0 by task syz-executor003/7854
+
+CPU: 0 PID: 7854 Comm: syz-executor003 Not tainted 5.0.0+ #97
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+Call Trace:
+ __dump_stack lib/dump_stack.c:77 [inline]
+ dump_stack+0x172/0x1f0 lib/dump_stack.c:113
+ print_address_description.cold+0x7c/0x20d mm/kasan/report.c:187
+ kasan_report.cold+0x1b/0x40 mm/kasan/report.c:317
+ __asan_report_load8_noabort+0x14/0x20 mm/kasan/generic_report.c:135
+ x25_kill_by_device net/x25/af_x25.c:217 [inline]
+ x25_device_event+0x296/0x2b0 net/x25/af_x25.c:252
+ notifier_call_chain+0xc7/0x240 kernel/notifier.c:93
+ __raw_notifier_call_chain kernel/notifier.c:394 [inline]
+ raw_notifier_call_chain+0x2e/0x40 kernel/notifier.c:401
+ call_netdevice_notifiers_info+0x3f/0x90 net/core/dev.c:1739
+ call_netdevice_notifiers_extack net/core/dev.c:1751 [inline]
+ call_netdevice_notifiers net/core/dev.c:1765 [inline]
+ __dev_notify_flags+0x1e9/0x2c0 net/core/dev.c:7607
+ dev_change_flags+0x10d/0x170 net/core/dev.c:7643
+ dev_ifsioc+0x2b0/0x940 net/core/dev_ioctl.c:237
+ dev_ioctl+0x1b8/0xc70 net/core/dev_ioctl.c:488
+ sock_do_ioctl+0x1bd/0x300 net/socket.c:995
+ sock_ioctl+0x32b/0x610 net/socket.c:1096
+ vfs_ioctl fs/ioctl.c:46 [inline]
+ file_ioctl fs/ioctl.c:509 [inline]
+ do_vfs_ioctl+0xd6e/0x1390 fs/ioctl.c:696
+ ksys_ioctl+0xab/0xd0 fs/ioctl.c:713
+ __do_sys_ioctl fs/ioctl.c:720 [inline]
+ __se_sys_ioctl fs/ioctl.c:718 [inline]
+ __x64_sys_ioctl+0x73/0xb0 fs/ioctl.c:718
+ do_syscall_64+0x103/0x610 arch/x86/entry/common.c:290
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+RIP: 0033:0x4467c9
+Code: e8 0c e8 ff ff 48 83 c4 18 c3 0f 1f 80 00 00 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 5b 07 fc ff c3 66 2e 0f 1f 84 00 00 00 00
+RSP: 002b:00007fdbea222d98 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+RAX: ffffffffffffffda RBX: 00000000006dbc58 RCX: 00000000004467c9
+RDX: 0000000020000340 RSI: 0000000000008914 RDI: 0000000000000003
+RBP: 00000000006dbc50 R08: 00007fdbea223700 R09: 0000000000000000
+R10: 00007fdbea223700 R11: 0000000000000246 R12: 00000000006dbc5c
+R13: 6000030030626669 R14: 0000000000000000 R15: 0000000030626669
+
+Allocated by task 7843:
+ save_stack+0x45/0xd0 mm/kasan/common.c:73
+ set_track mm/kasan/common.c:85 [inline]
+ __kasan_kmalloc mm/kasan/common.c:495 [inline]
+ __kasan_kmalloc.constprop.0+0xcf/0xe0 mm/kasan/common.c:468
+ kasan_kmalloc+0x9/0x10 mm/kasan/common.c:509
+ kmem_cache_alloc_trace+0x151/0x760 mm/slab.c:3615
+ kmalloc include/linux/slab.h:545 [inline]
+ x25_link_device_up+0x46/0x3f0 net/x25/x25_link.c:249
+ x25_device_event+0x116/0x2b0 net/x25/af_x25.c:242
+ notifier_call_chain+0xc7/0x240 kernel/notifier.c:93
+ __raw_notifier_call_chain kernel/notifier.c:394 [inline]
+ raw_notifier_call_chain+0x2e/0x40 kernel/notifier.c:401
+ call_netdevice_notifiers_info+0x3f/0x90 net/core/dev.c:1739
+ call_netdevice_notifiers_extack net/core/dev.c:1751 [inline]
+ call_netdevice_notifiers net/core/dev.c:1765 [inline]
+ __dev_notify_flags+0x121/0x2c0 net/core/dev.c:7605
+ dev_change_flags+0x10d/0x170 net/core/dev.c:7643
+ dev_ifsioc+0x2b0/0x940 net/core/dev_ioctl.c:237
+ dev_ioctl+0x1b8/0xc70 net/core/dev_ioctl.c:488
+ sock_do_ioctl+0x1bd/0x300 net/socket.c:995
+ sock_ioctl+0x32b/0x610 net/socket.c:1096
+ vfs_ioctl fs/ioctl.c:46 [inline]
+ file_ioctl fs/ioctl.c:509 [inline]
+ do_vfs_ioctl+0xd6e/0x1390 fs/ioctl.c:696
+ ksys_ioctl+0xab/0xd0 fs/ioctl.c:713
+ __do_sys_ioctl fs/ioctl.c:720 [inline]
+ __se_sys_ioctl fs/ioctl.c:718 [inline]
+ __x64_sys_ioctl+0x73/0xb0 fs/ioctl.c:718
+ do_syscall_64+0x103/0x610 arch/x86/entry/common.c:290
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+Freed by task 7865:
+ save_stack+0x45/0xd0 mm/kasan/common.c:73
+ set_track mm/kasan/common.c:85 [inline]
+ __kasan_slab_free+0x102/0x150 mm/kasan/common.c:457
+ kasan_slab_free+0xe/0x10 mm/kasan/common.c:465
+ __cache_free mm/slab.c:3494 [inline]
+ kfree+0xcf/0x230 mm/slab.c:3811
+ x25_neigh_put include/net/x25.h:253 [inline]
+ x25_connect+0x8d8/0xde0 net/x25/af_x25.c:824
+ __sys_connect+0x266/0x330 net/socket.c:1685
+ __do_sys_connect net/socket.c:1696 [inline]
+ __se_sys_connect net/socket.c:1693 [inline]
+ __x64_sys_connect+0x73/0xb0 net/socket.c:1693
+ do_syscall_64+0x103/0x610 arch/x86/entry/common.c:290
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+The buggy address belongs to the object at ffff8880a030edc0
+ which belongs to the cache kmalloc-256 of size 256
+The buggy address is located 16 bytes inside of
+ 256-byte region [ffff8880a030edc0, ffff8880a030eec0)
+The buggy address belongs to the page:
+page:ffffea000280c380 count:1 mapcount:0 mapping:ffff88812c3f07c0 index:0x0
+flags: 0x1fffc0000000200(slab)
+raw: 01fffc0000000200 ffffea0002806788 ffffea00027f0188 ffff88812c3f07c0
+raw: 0000000000000000 ffff8880a030e000 000000010000000c 0000000000000000
+page dumped because: kasan: bad access detected
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot+04babcefcd396fabec37@syzkaller.appspotmail.com
+Cc: andrew hendry <andrew.hendry@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/x25/af_x25.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
+index eff31348e20b..27171ac6fe3b 100644
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -820,8 +820,12 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
+ sock->state = SS_CONNECTED;
+ rc = 0;
+ out_put_neigh:
+- if (rc)
++ if (rc) {
++ read_lock_bh(&x25_list_lock);
+ x25_neigh_put(x25->neighbour);
++ x25->neighbour = NULL;
++ read_unlock_bh(&x25_list_lock);
++ }
+ out_put_route:
+ x25_route_put(rt);
+ out:
+--
+2.21.0
+
diff --git a/patches.suse/net-x25-reset-state-in-x25_connect.patch b/patches.suse/net-x25-reset-state-in-x25_connect.patch
new file mode 100644
index 0000000000..f1b702ab99
--- /dev/null
+++ b/patches.suse/net-x25-reset-state-in-x25_connect.patch
@@ -0,0 +1,84 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Mon, 11 Mar 2019 13:48:44 -0700
+Subject: net/x25: reset state in x25_connect()
+Git-commit: ee74d0bd4325efb41e38affe5955f920ed973f23
+Patch-mainline: v5.1-rc1
+References: networking-stable-19_03_15
+
+In case x25_connect() fails and frees the socket neighbour,
+we also need to undo the change done to x25->state.
+
+Before my last bug fix, we had use-after-free so this
+patch fixes a latent bug.
+
+syzbot report :
+
+kasan: CONFIG_KASAN_INLINE enabled
+kasan: GPF could be caused by NULL-ptr deref or user memory access
+general protection fault: 0000 [#1] PREEMPT SMP KASAN
+CPU: 1 PID: 16137 Comm: syz-executor.1 Not tainted 5.0.0+ #117
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+RIP: 0010:x25_write_internal+0x1e8/0xdf0 net/x25/x25_subr.c:173
+Code: 00 40 88 b5 e0 fe ff ff 0f 85 01 0b 00 00 48 8b 8b 80 04 00 00 48 ba 00 00 00 00 00 fc ff df 48 8d 79 1c 48 89 fe 48 c1 ee 03 <0f> b6 34 16 48 89 fa 83 e2 07 83 c2 03 40 38 f2 7c 09 40 84 f6 0f
+RSP: 0018:ffff888076717a08 EFLAGS: 00010207
+RAX: ffff88805f2f2292 RBX: ffff8880a0ae6000 RCX: 0000000000000000
+kobject: 'loop5' (0000000018d0d0ee): kobject_uevent_env
+RDX: dffffc0000000000 RSI: 0000000000000003 RDI: 000000000000001c
+RBP: ffff888076717b40 R08: ffff8880950e0580 R09: ffffed100be5e46d
+R10: ffffed100be5e46c R11: ffff88805f2f2363 R12: ffff888065579840
+kobject: 'loop5' (0000000018d0d0ee): fill_kobj_path: path = '/devices/virtual/block/loop5'
+R13: 1ffff1100ece2f47 R14: 0000000000000013 R15: 0000000000000013
+FS: 00007fb88cf43700(0000) GS:ffff8880ae900000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f9a42a41028 CR3: 0000000087a67000 CR4: 00000000001406e0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ x25_release+0xd0/0x340 net/x25/af_x25.c:658
+ __sock_release+0xd3/0x2b0 net/socket.c:579
+ sock_close+0x1b/0x30 net/socket.c:1162
+ __fput+0x2df/0x8d0 fs/file_table.c:278
+ ____fput+0x16/0x20 fs/file_table.c:309
+ task_work_run+0x14a/0x1c0 kernel/task_work.c:113
+ get_signal+0x1961/0x1d50 kernel/signal.c:2388
+ do_signal+0x87/0x1940 arch/x86/kernel/signal.c:816
+ exit_to_usermode_loop+0x244/0x2c0 arch/x86/entry/common.c:162
+ prepare_exit_to_usermode arch/x86/entry/common.c:197 [inline]
+ syscall_return_slowpath arch/x86/entry/common.c:268 [inline]
+ do_syscall_64+0x52d/0x610 arch/x86/entry/common.c:293
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+RIP: 0033:0x457f29
+Code: ad b8 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 7b b8 fb ff c3 66 2e 0f 1f 84 00 00 00 00
+RSP: 002b:00007fb88cf42c78 EFLAGS: 00000246 ORIG_RAX: 000000000000002a
+RAX: fffffffffffffe00 RBX: 0000000000000003 RCX: 0000000000457f29
+RDX: 0000000000000012 RSI: 0000000020000080 RDI: 0000000000000004
+RBP: 000000000073bf00 R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000246 R12: 00007fb88cf436d4
+R13: 00000000004be462 R14: 00000000004cec98 R15: 00000000ffffffff
+Modules linked in:
+
+Fixes: 95d6ebd53c79 ("net/x25: fix use-after-free in x25_device_event()")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: andrew hendry <andrew.hendry@gmail.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/x25/af_x25.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
+index 27171ac6fe3b..20a511398389 100644
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -825,6 +825,7 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
+ x25_neigh_put(x25->neighbour);
+ x25->neighbour = NULL;
+ read_unlock_bh(&x25_list_lock);
++ x25->state = X25_STATE_0;
+ }
+ out_put_route:
+ x25_route_put(rt);
+--
+2.21.0
+
diff --git a/patches.suse/net_sched-fix-two-more-memory-leaks-in-cls_tcindex.patch b/patches.suse/net_sched-fix-two-more-memory-leaks-in-cls_tcindex.patch
new file mode 100644
index 0000000000..672bedcb72
--- /dev/null
+++ b/patches.suse/net_sched-fix-two-more-memory-leaks-in-cls_tcindex.patch
@@ -0,0 +1,89 @@
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Mon, 11 Feb 2019 13:06:16 -0800
+Subject: net_sched: fix two more memory leaks in cls_tcindex
+Git-commit: 1db817e75f5b9387b8db11e37d5f0624eb9223e0
+Patch-mainline: v5.0-rc7
+References: networking-stable-19_02_24
+
+struct tcindex_filter_result contains two parts:
+struct tcf_exts and struct tcf_result.
+
+For the local variable 'cr', its exts part is never used but
+initialized without being released properly on success path. So
+just completely remove the exts part to fix this leak.
+
+For the local variable 'new_filter_result', it is never properly
+released if not used by 'r' on success path.
+
+Cc: Jamal Hadi Salim <jhs@mojatatu.com>
+Cc: Jiri Pirko <jiri@resnulli.us>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/sched/cls_tcindex.c | 16 +++++++---------
+ 1 file changed, 7 insertions(+), 9 deletions(-)
+
+--- a/net/sched/cls_tcindex.c
++++ b/net/sched/cls_tcindex.c
+@@ -270,9 +270,9 @@ tcindex_set_parms(struct net *net, struc
+ struct nlattr *est, bool ovr)
+ {
+ struct tcindex_filter_result new_filter_result, *old_r = r;
+- struct tcindex_filter_result cr;
+ struct tcindex_data *cp = NULL, *oldp;
+ struct tcindex_filter *f = NULL; /* make gcc behave */
++ struct tcf_result cr = {};
+ int err, balloc = 0;
+ struct tcf_exts e;
+
+@@ -313,11 +313,8 @@ tcindex_set_parms(struct net *net, struc
+ err = tcindex_filter_result_init(&new_filter_result);
+ if (err < 0)
+ goto errout1;
+- err = tcindex_filter_result_init(&cr);
+- if (err < 0)
+- goto errout1;
+ if (old_r)
+- cr.res = r->res;
++ cr = r->res;
+
+ if (tb[TCA_TCINDEX_HASH])
+ cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
+@@ -408,8 +405,8 @@ tcindex_set_parms(struct net *net, struc
+ }
+
+ if (tb[TCA_TCINDEX_CLASSID]) {
+- cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
+- tcf_bind_filter(tp, &cr.res, base);
++ cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
++ tcf_bind_filter(tp, &cr, base);
+ }
+
+ if (old_r && old_r != r) {
+@@ -421,7 +418,7 @@ tcindex_set_parms(struct net *net, struc
+ }
+
+ oldp = p;
+- r->res = cr.res;
++ r->res = cr;
+ tcf_exts_change(tp, &r->exts, &e);
+
+ rcu_assign_pointer(tp->root, cp);
+@@ -440,6 +437,8 @@ tcindex_set_parms(struct net *net, struc
+ ; /* nothing */
+
+ rcu_assign_pointer(*fp, f);
++ } else {
++ tcf_exts_destroy(&new_filter_result.exts);
+ }
+
+ if (oldp)
+@@ -452,7 +451,6 @@ errout_alloc:
+ else if (balloc == 2)
+ kfree(cp->h);
+ errout1:
+- tcf_exts_destroy(&cr.exts);
+ tcf_exts_destroy(&new_filter_result.exts);
+ errout:
+ kfree(cp);
diff --git a/patches.suse/netlabel-fix-out-of-bounds-memory-accesses.patch b/patches.suse/netlabel-fix-out-of-bounds-memory-accesses.patch
new file mode 100644
index 0000000000..0aa252340a
--- /dev/null
+++ b/patches.suse/netlabel-fix-out-of-bounds-memory-accesses.patch
@@ -0,0 +1,51 @@
+From: Paul Moore <paul@paul-moore.com>
+Date: Mon, 25 Feb 2019 19:06:06 -0500
+Subject: netlabel: fix out-of-bounds memory accesses
+Git-commit: 5578de4834fe0f2a34fedc7374be691443396d1f
+Patch-mainline: v5.0
+References: networking-stable-19_03_07
+
+There are two array out-of-bounds memory accesses, one in
+cipso_v4_map_lvl_valid(), the other in netlbl_bitmap_walk(). Both
+errors are embarassingly simple, and the fixes are straightforward.
+
+As a FYI for anyone backporting this patch to kernels prior to v4.8,
+you'll want to apply the netlbl_bitmap_walk() patch to
+cipso_v4_bitmap_walk() as netlbl_bitmap_walk() doesn't exist before
+Linux v4.8.
+
+Reported-by: Jann Horn <jannh@google.com>
+Fixes: 446fda4f2682 ("[NetLabel]: CIPSOv4 engine")
+Fixes: 3faa8f982f95 ("netlabel: Move bitmap manipulation functions to the NetLabel core.")
+Signed-off-by: Paul Moore <paul@paul-moore.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/ipv4/cipso_ipv4.c | 3 ++-
+ net/netlabel/netlabel_kapi.c | 3 ++-
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/cipso_ipv4.c
++++ b/net/ipv4/cipso_ipv4.c
+@@ -667,7 +667,8 @@ static int cipso_v4_map_lvl_valid(const
+ case CIPSO_V4_MAP_PASS:
+ return 0;
+ case CIPSO_V4_MAP_TRANS:
+- if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL)
++ if ((level < doi_def->map.std->lvl.cipso_size) &&
++ (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL))
+ return 0;
+ break;
+ }
+--- a/net/netlabel/netlabel_kapi.c
++++ b/net/netlabel/netlabel_kapi.c
+@@ -903,7 +903,8 @@ int netlbl_bitmap_walk(const unsigned ch
+ (state == 0 && (byte & bitmask) == 0))
+ return bit_spot;
+
+- bit_spot++;
++ if (++bit_spot >= bitmap_len)
++ return -1;
+ bitmask >>= 1;
+ if (bitmask == 0) {
+ byte = bitmap[++byte_offset];
diff --git a/patches.suse/qmi_wwan-Add-support-for-Quectel-EG12-EM12.patch b/patches.suse/qmi_wwan-Add-support-for-Quectel-EG12-EM12.patch
new file mode 100644
index 0000000000..ab48e5af1c
--- /dev/null
+++ b/patches.suse/qmi_wwan-Add-support-for-Quectel-EG12-EM12.patch
@@ -0,0 +1,83 @@
+From: Kristian Evensen <kristian.evensen@gmail.com>
+Date: Sat, 2 Mar 2019 13:32:26 +0100
+Subject: qmi_wwan: Add support for Quectel EG12/EM12
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Git-commit: 822e44b45eb991c63487c5e2ce7d636411870a8d
+Patch-mainline: v5.1-rc1
+References: networking-stable-19_03_07
+
+Quectel EG12 (module)/EM12 (M.2 card) is a Cat. 12 LTE modem. The modem
+behaves in the same way as the EP06, so the "set DTR"-quirk must be
+applied and the diagnostic-interface check performed. Since the
+diagnostic-check now applies to more modems, I have renamed the function
+from quectel_ep06_diag_detected() to quectel_diag_detected().
+
+Signed-off-by: Kristian Evensen <kristian.evensen@gmail.com>
+Acked-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ drivers/net/usb/qmi_wwan.c | 26 ++++++++++++++++++--------
+ 1 file changed, 18 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -976,6 +976,13 @@ static const struct usb_device_id produc
+ 0xff),
+ .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,
+ },
++ { /* Quectel EG12/EM12 */
++ USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0512,
++ USB_CLASS_VENDOR_SPEC,
++ USB_SUBCLASS_VENDOR_SPEC,
++ 0xff),
++ .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,
++ },
+
+ /* 3. Combined interface devices matching on interface number */
+ {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
+@@ -1343,17 +1350,20 @@ static bool quectel_ec20_detected(struct
+ return false;
+ }
+
+-static bool quectel_ep06_diag_detected(struct usb_interface *intf)
++static bool quectel_diag_detected(struct usb_interface *intf)
+ {
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc;
++ u16 id_vendor = le16_to_cpu(dev->descriptor.idVendor);
++ u16 id_product = le16_to_cpu(dev->descriptor.idProduct);
+
+- if (le16_to_cpu(dev->descriptor.idVendor) == 0x2c7c &&
+- le16_to_cpu(dev->descriptor.idProduct) == 0x0306 &&
+- intf_desc.bNumEndpoints == 2)
+- return true;
++ if (id_vendor != 0x2c7c || intf_desc.bNumEndpoints != 2)
++ return false;
+
+- return false;
++ if (id_product == 0x0306 || id_product == 0x0512)
++ return true;
++ else
++ return false;
+ }
+
+ static int qmi_wwan_probe(struct usb_interface *intf,
+@@ -1390,13 +1400,13 @@ static int qmi_wwan_probe(struct usb_int
+ return -ENODEV;
+ }
+
+- /* Quectel EP06/EM06/EG06 supports dynamic interface configuration, so
++ /* Several Quectel modems supports dynamic interface configuration, so
+ * we need to match on class/subclass/protocol. These values are
+ * identical for the diagnostic- and QMI-interface, but bNumEndpoints is
+ * different. Ignore the current interface if the number of endpoints
+ * the number for the diag interface (two).
+ */
+- if (quectel_ep06_diag_detected(intf))
++ if (quectel_diag_detected(intf))
+ return -ENODEV;
+
+ return usbnet_probe(intf, id);
diff --git a/patches.suse/ravb-Decrease-TxFIFO-depth-of-Q3-and-Q2-to-one.patch b/patches.suse/ravb-Decrease-TxFIFO-depth-of-Q3-and-Q2-to-one.patch
new file mode 100644
index 0000000000..ae9710458a
--- /dev/null
+++ b/patches.suse/ravb-Decrease-TxFIFO-depth-of-Q3-and-Q2-to-one.patch
@@ -0,0 +1,46 @@
+From: Masaru Nagai <masaru.nagai.vx@renesas.com>
+Date: Thu, 7 Mar 2019 11:24:47 +0100
+Subject: ravb: Decrease TxFIFO depth of Q3 and Q2 to one
+Git-commit: ae9819e339b451da7a86ab6fe38ecfcb6814e78a
+Patch-mainline: v5.1-rc1
+References: networking-stable-19_03_15
+
+Hardware has the CBS (Credit Based Shaper) which affects only Q3
+and Q2. When updating the CBS settings, even if the driver does so
+after waiting for Tx DMA finished, there is a possibility that frame
+data still remains in TxFIFO.
+
+To avoid this, decrease TxFIFO depth of Q3 and Q2 to one.
+
+This patch has been exercised this using netperf TCP_MAERTS, TCP_STREAM
+and UDP_STREAM tests run on an Ebisu board. No performance change was
+detected, outside of noise in the tests, both in terms of throughput and
+CPU utilisation.
+
+Fixes: c156633f1353 ("Renesas Ethernet AVB driver proper")
+Signed-off-by: Masaru Nagai <masaru.nagai.vx@renesas.com>
+Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com>
+[simon: updated changelog]
+Signed-off-by: Simon Horman <horms+renesas@verge.net.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ drivers/net/ethernet/renesas/ravb_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index d28c8f9ca55b..8154b38c08f7 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -458,7 +458,7 @@ static int ravb_dmac_init(struct net_device *ndev)
+ RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
+
+ /* Set FIFO size */
+- ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);
++ ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC);
+
+ /* Timestamp enable */
+ ravb_write(ndev, TCCR_TFEN, TCCR);
+--
+2.21.0
+
diff --git a/patches.suse/route-set-the-deleted-fnhe-fnhe_daddr-to-0-in-ip_del.patch b/patches.suse/route-set-the-deleted-fnhe-fnhe_daddr-to-0-in-ip_del.patch
new file mode 100644
index 0000000000..0077eeea78
--- /dev/null
+++ b/patches.suse/route-set-the-deleted-fnhe-fnhe_daddr-to-0-in-ip_del.patch
@@ -0,0 +1,59 @@
+From: Xin Long <lucien.xin@gmail.com>
+Date: Fri, 8 Mar 2019 14:50:54 +0800
+Subject: route: set the deleted fnhe fnhe_daddr to 0 in ip_del_fnhe to fix a
+ race
+Git-commit: ee60ad219f5c7c4fb2f047f88037770063ef785f
+Patch-mainline: v5.1-rc1
+References: networking-stable-19_03_15
+
+The race occurs in __mkroute_output() when 2 threads lookup a dst:
+
+ CPU A CPU B
+ find_exception()
+ find_exception() [fnhe expires]
+ ip_del_fnhe() [fnhe is deleted]
+ rt_bind_exception()
+
+In rt_bind_exception() it will bind a deleted fnhe with the new dst, and
+this dst will get no chance to be freed. It causes a dev defcnt leak and
+consecutive dmesg warnings:
+
+ unregister_netdevice: waiting for ethX to become free. Usage count = 1
+
+Especially thanks Jon to identify the issue.
+
+This patch fixes it by setting fnhe_daddr to 0 in ip_del_fnhe() to stop
+binding the deleted fnhe with a new dst when checking fnhe's fnhe_daddr
+and daddr in rt_bind_exception().
+
+It works as both ip_del_fnhe() and rt_bind_exception() are protected by
+fnhe_lock and the fhne is freed by kfree_rcu().
+
+Fixes: deed49df7390 ("route: check and remove route cache when we get route")
+Signed-off-by: Jon Maxwell <jmaxwell37@gmail.com>
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Reviewed-by: David Ahern <dsahern@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/ipv4/route.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 8ca3642f0d9b..a5da63e5faa2 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1303,6 +1303,10 @@ static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
+ if (fnhe->fnhe_daddr == daddr) {
+ rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
+ fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
++ /* set fnhe_daddr to 0 to ensure it won't bind with
++ * new dsts in rt_bind_exception().
++ */
++ fnhe->fnhe_daddr = 0;
+ fnhe_flush_routes(fnhe);
+ kfree_rcu(fnhe, rcu);
+ break;
+--
+2.21.0
+
diff --git a/patches.suse/rxrpc-Fix-client-call-queueing-waiting-for-channel.patch b/patches.suse/rxrpc-Fix-client-call-queueing-waiting-for-channel.patch
new file mode 100644
index 0000000000..5ee53b3d9c
--- /dev/null
+++ b/patches.suse/rxrpc-Fix-client-call-queueing-waiting-for-channel.patch
@@ -0,0 +1,47 @@
+From: David Howells <dhowells@redhat.com>
+Date: Sat, 9 Mar 2019 00:29:58 +0000
+Subject: rxrpc: Fix client call queueing, waiting for channel
+Git-commit: 69ffaebb90369ce08657b5aea4896777b9d6e8fc
+Patch-mainline: v5.1-rc1
+References: networking-stable-19_03_15
+
+rxrpc_get_client_conn() adds a new call to the front of the waiting_calls
+queue if the connection it's going to use already exists. This is bad as
+it allows calls to get starved out.
+
+Fix this by adding to the tail instead.
+
+Also change the other enqueue point in the same function to put it on the
+front (ie. when we have a new connection). This makes the point that in
+the case of a new connection the new call goes at the front (though it
+doesn't actually matter since the queue should be unoccupied).
+
+Fixes: 45025bceef17 ("rxrpc: Improve management and caching of client connection objects")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Reviewed-by: Marc Dionne <marc.dionne@auristor.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/rxrpc/conn_client.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/rxrpc/conn_client.c
++++ b/net/rxrpc/conn_client.c
+@@ -349,7 +349,7 @@ static int rxrpc_get_client_conn(struct
+ * normally have to take channel_lock but we do this before anyone else
+ * can see the connection.
+ */
+- list_add_tail(&call->chan_wait_link, &candidate->waiting_calls);
++ list_add(&call->chan_wait_link, &candidate->waiting_calls);
+
+ if (cp->exclusive) {
+ call->conn = candidate;
+@@ -424,7 +424,7 @@ found_extant_conn:
+ spin_lock(&conn->channel_lock);
+ call->conn = conn;
+ call->security_ix = conn->security_ix;
+- list_add(&call->chan_wait_link, &conn->waiting_calls);
++ list_add_tail(&call->chan_wait_link, &conn->waiting_calls);
+ spin_unlock(&conn->channel_lock);
+ _leave(" = 0 [extant %d]", conn->debug_id);
+ return 0;
diff --git a/patches.suse/sctp-call-gso_reset_checksum-when-computing-checksum.patch b/patches.suse/sctp-call-gso_reset_checksum-when-computing-checksum.patch
new file mode 100644
index 0000000000..17faf33042
--- /dev/null
+++ b/patches.suse/sctp-call-gso_reset_checksum-when-computing-checksum.patch
@@ -0,0 +1,73 @@
+From: Xin Long <lucien.xin@gmail.com>
+Date: Tue, 12 Feb 2019 18:47:30 +0800
+Subject: sctp: call gso_reset_checksum when computing checksum in
+ sctp_gso_segment
+Git-commit: fc228abc2347e106a44c0e9b29ab70b712c4ca51
+Patch-mainline: v5.0-rc7
+References: networking-stable-19_02_24
+
+Jianlin reported a panic when running sctp gso over gre over vlan device:
+
+ [ 84.772930] RIP: 0010:do_csum+0x6d/0x170
+ [ 84.790605] Call Trace:
+ [ 84.791054] csum_partial+0xd/0x20
+ [ 84.791657] gre_gso_segment+0x2c3/0x390
+ [ 84.792364] inet_gso_segment+0x161/0x3e0
+ [ 84.793071] skb_mac_gso_segment+0xb8/0x120
+ [ 84.793846] __skb_gso_segment+0x7e/0x180
+ [ 84.794581] validate_xmit_skb+0x141/0x2e0
+ [ 84.795297] __dev_queue_xmit+0x258/0x8f0
+ [ 84.795949] ? eth_header+0x26/0xc0
+ [ 84.796581] ip_finish_output2+0x196/0x430
+ [ 84.797295] ? skb_gso_validate_network_len+0x11/0x80
+ [ 84.798183] ? ip_finish_output+0x169/0x270
+ [ 84.798875] ip_output+0x6c/0xe0
+ [ 84.799413] ? ip_append_data.part.50+0xc0/0xc0
+ [ 84.800145] iptunnel_xmit+0x144/0x1c0
+ [ 84.800814] ip_tunnel_xmit+0x62d/0x930 [ip_tunnel]
+ [ 84.801699] gre_tap_xmit+0xac/0xf0 [ip_gre]
+ [ 84.802395] dev_hard_start_xmit+0xa5/0x210
+ [ 84.803086] sch_direct_xmit+0x14f/0x340
+ [ 84.803733] __dev_queue_xmit+0x799/0x8f0
+ [ 84.804472] ip_finish_output2+0x2e0/0x430
+ [ 84.805255] ? skb_gso_validate_network_len+0x11/0x80
+ [ 84.806154] ip_output+0x6c/0xe0
+ [ 84.806721] ? ip_append_data.part.50+0xc0/0xc0
+ [ 84.807516] sctp_packet_transmit+0x716/0xa10 [sctp]
+ [ 84.808337] sctp_outq_flush+0xd7/0x880 [sctp]
+
+It was caused by SKB_GSO_CB(skb)->csum_start not set in sctp_gso_segment.
+sctp_gso_segment() calls skb_segment() with 'feature | NETIF_F_HW_CSUM',
+which causes SKB_GSO_CB(skb)->csum_start not to be set in skb_segment().
+
+For TCP/UDP, when feature supports HW_CSUM, CHECKSUM_PARTIAL will be set
+and gso_reset_checksum will be called to set SKB_GSO_CB(skb)->csum_start.
+
+So SCTP should do the same as TCP/UDP, to call gso_reset_checksum() when
+computing checksum in sctp_gso_segment.
+
+Reported-by: Jianlin Shi <jishi@redhat.com>
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Acked-by: Neil Horman <nhorman@tuxdriver.com>
+Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/sctp/offload.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/sctp/offload.c b/net/sctp/offload.c
+index 123e9f2dc226..edfcf16e704c 100644
+--- a/net/sctp/offload.c
++++ b/net/sctp/offload.c
+@@ -36,6 +36,7 @@ static __le32 sctp_gso_make_checksum(struct sk_buff *skb)
+ {
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->csum_not_inet = 0;
++ gso_reset_checksum(skb, ~0);
+ return sctp_compute_cksum(skb, skb_transport_offset(skb));
+ }
+
+--
+2.21.0
+
diff --git a/patches.suse/sit-check-if-IPv6-enabled-before-calling-ip6_err_gen.patch b/patches.suse/sit-check-if-IPv6-enabled-before-calling-ip6_err_gen.patch
new file mode 100644
index 0000000000..653a6cbbea
--- /dev/null
+++ b/patches.suse/sit-check-if-IPv6-enabled-before-calling-ip6_err_gen.patch
@@ -0,0 +1,50 @@
+From: Hangbin Liu <liuhangbin@gmail.com>
+Date: Thu, 7 Feb 2019 18:36:11 +0800
+Subject: sit: check if IPv6 enabled before calling
+ ip6_err_gen_icmpv6_unreach()
+Git-commit: 173656accaf583698bac3f9e269884ba60d51ef4
+Patch-mainline: v5.0-rc6
+References: networking-stable-19_02_24
+
+If we disabled IPv6 from the kernel command line (ipv6.disable=1), we should
+not call ip6_err_gen_icmpv6_unreach(). This:
+
+ ip link add sit1 type sit local 192.0.2.1 remote 192.0.2.2 ttl 1
+ ip link set sit1 up
+ ip addr add 198.51.100.1/24 dev sit1
+ ping 198.51.100.2
+
+if IPv6 is disabled at boot time, will crash the kernel.
+
+v2: there's no need to use in6_dev_get(), use __in6_dev_get() instead,
+ as we only need to check that idev exists and we are under
+ rcu_read_lock() (from netif_receive_skb_internal()).
+
+Reported-by: Jianlin Shi <jishi@redhat.com>
+Fixes: ca15a078bd90 ("sit: generate icmpv6 error when receiving icmpv4 error")
+Cc: Oussama Ghorbel <ghorbel@pivasoftware.com>
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/ipv6/sit.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 1e03305c0549..e8a1dabef803 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -546,7 +546,8 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
+ }
+
+ err = 0;
+- if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
++ if (__in6_dev_get(skb->dev) &&
++ !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
+ goto out;
+
+ if (t->parms.iph.daddr == 0)
+--
+2.21.0
+
diff --git a/patches.suse/tcp-tcp_v4_err-should-be-more-careful.patch b/patches.suse/tcp-tcp_v4_err-should-be-more-careful.patch
new file mode 100644
index 0000000000..dcca9e6aff
--- /dev/null
+++ b/patches.suse/tcp-tcp_v4_err-should-be-more-careful.patch
@@ -0,0 +1,44 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 15 Feb 2019 13:36:21 -0800
+Subject: tcp: tcp_v4_err() should be more careful
+Git-commit: 2c4cc9712364c051b1de2d175d5fbea6be948ebf
+Patch-mainline: v5.0-rc8
+References: networking-stable-19_02_20
+
+ICMP handlers are not very often stressed, we should
+make them more resilient to bugs that might surface in
+the future.
+
+If there is no packet in retransmit queue, we should
+avoid a NULL deref.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: soukjin bae <soukjin.bae@samsung.com>
+Acked-by: Neal Cardwell <ncardwell@google.com>
+Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/ipv4/tcp_ipv4.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -474,13 +474,15 @@ void tcp_v4_err(struct sk_buff *icmp_skb
+ if (sock_owned_by_user(sk))
+ break;
+
++ skb = tcp_write_queue_head(sk);
++ if (WARN_ON_ONCE(!skb))
++ break;
++
+ icsk->icsk_backoff--;
+ icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
+ TCP_TIMEOUT_INIT;
+ icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
+
+- skb = tcp_write_queue_head(sk);
+- BUG_ON(!skb);
+
+ tcp_mstamp_refresh(tp);
+ delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
diff --git a/patches.suse/tipc-fix-race-condition-causing-hung-sendto.patch b/patches.suse/tipc-fix-race-condition-causing-hung-sendto.patch
new file mode 100644
index 0000000000..cf0ddc3adc
--- /dev/null
+++ b/patches.suse/tipc-fix-race-condition-causing-hung-sendto.patch
@@ -0,0 +1,83 @@
+From: Tung Nguyen <tung.q.nguyen@dektech.com.au>
+Date: Mon, 25 Feb 2019 10:57:20 +0700
+Subject: tipc: fix race condition causing hung sendto
+Git-commit: bfd07f3dd4f111b884d7922b37eb239280f83d8c
+Patch-mainline: v5.0
+References: networking-stable-19_03_07
+
+When sending multicast messages via blocking socket,
+if sending link is congested (tsk->cong_link_cnt is set to 1),
+the sending thread will be put into sleeping state. However,
+tipc_sk_filter_rcv() is called under socket spin lock but
+tipc_wait_for_cond() is not. So, there is no guarantee that
+the setting of tsk->cong_link_cnt to 0 in tipc_sk_proto_rcv() in
+CPU-1 will be perceived by CPU-0. If that is the case, the sending
+thread in CPU-0 after being waken up, will continue to see
+tsk->cong_link_cnt as 1 and put the sending thread into sleeping
+state again. The sending thread will sleep forever.
+
+CPU-0 | CPU-1
+tipc_wait_for_cond() |
+{ |
+ // condition_ = !tsk->cong_link_cnt |
+ while ((rc_ = !(condition_))) { |
+ ... |
+ release_sock(sk_); |
+ wait_woken(); |
+ | if (!sock_owned_by_user(sk))
+ | tipc_sk_filter_rcv()
+ | {
+ | ...
+ | tipc_sk_proto_rcv()
+ | {
+ | ...
+ | tsk->cong_link_cnt--;
+ | ...
+ | sk->sk_write_space(sk);
+ | ...
+ | }
+ | ...
+ | }
+ sched_annotate_sleep(); |
+ lock_sock(sk_); |
+ remove_wait_queue(); |
+ } |
+} |
+
+This commit fixes it by adding memory barrier to tipc_sk_proto_rcv()
+and tipc_wait_for_cond().
+
+Acked-by: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: Tung Nguyen <tung.q.nguyen@dektech.com.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/tipc/socket.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -364,11 +364,13 @@ static int tipc_sk_sock_err(struct socke
+
+ #define tipc_wait_for_cond(sock_, timeo_, condition_) \
+ ({ \
++ DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
+ struct sock *sk_; \
+ int rc_; \
+ \
+ while ((rc_ = !(condition_))) { \
+- DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
++ /* coupled with smp_wmb() in tipc_sk_proto_rcv() */ \
++ smp_rmb(); \
+ sk_ = (sock_)->sk; \
+ rc_ = tipc_sk_sock_err((sock_), timeo_); \
+ if (rc_) \
+@@ -1692,6 +1694,8 @@ static bool filter_rcv(struct sock *sk,
+ onode = msg_orignode(hdr);
+ kfree_skb(skb);
+ u32_del(&tsk->cong_links, onode);
++ /* coupled with smp_rmb() in tipc_wait_for_cond() */
++ smp_wmb();
+ tsk->cong_link_cnt--;
+ sk->sk_write_space(sk);
+ return false;
diff --git a/patches.suse/tun-fix-blocking-read.patch b/patches.suse/tun-fix-blocking-read.patch
new file mode 100644
index 0000000000..b028a60b5a
--- /dev/null
+++ b/patches.suse/tun-fix-blocking-read.patch
@@ -0,0 +1,40 @@
+From: Timur Celik <mail@timurcelik.de>
+Date: Sat, 23 Feb 2019 12:53:13 +0100
+Subject: tun: fix blocking read
+Git-commit: 71828b2240692cec0e68b8d867bc00e1745e7fae
+Patch-mainline: v5.0
+References: networking-stable-19_03_07
+
+This patch moves setting of the current state into the loop. Otherwise
+the task may end up in a busy wait loop if none of the break conditions
+are met.
+
+Signed-off-by: Timur Celik <mail@timurcelik.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ drivers/net/tun.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1498,9 +1498,9 @@ static struct sk_buff *tun_ring_recv(str
+ }
+
+ add_wait_queue(&tfile->wq.wait, &wait);
+- current->state = TASK_INTERRUPTIBLE;
+
+ while (1) {
++ set_current_state(TASK_INTERRUPTIBLE);
+ skb = skb_array_consume(&tfile->tx_array);
+ if (skb)
+ break;
+@@ -1516,7 +1516,7 @@ static struct sk_buff *tun_ring_recv(str
+ schedule();
+ }
+
+- current->state = TASK_RUNNING;
++ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&tfile->wq.wait, &wait);
+
+ out:
diff --git a/patches.suse/tun-remove-unnecessary-memory-barrier.patch b/patches.suse/tun-remove-unnecessary-memory-barrier.patch
new file mode 100644
index 0000000000..bc142e7685
--- /dev/null
+++ b/patches.suse/tun-remove-unnecessary-memory-barrier.patch
@@ -0,0 +1,29 @@
+From: Timur Celik <mail@timurcelik.de>
+Date: Mon, 25 Feb 2019 21:13:13 +0100
+Subject: tun: remove unnecessary memory barrier
+Git-commit: ecef67cb10db7b83b3b71c61dbb29aa070ab0112
+Patch-mainline: v5.0
+References: networking-stable-19_03_07
+
+Replace set_current_state with __set_current_state since no memory
+barrier is needed at this point.
+
+Signed-off-by: Timur Celik <mail@timurcelik.de>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ drivers/net/tun.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1516,7 +1516,7 @@ static struct sk_buff *tun_ring_recv(str
+ schedule();
+ }
+
+- set_current_state(TASK_RUNNING);
++ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&tfile->wq.wait, &wait);
+
+ out:
diff --git a/patches.suse/vxlan-test-dev-flags-IFF_UP-before-calling-netif_rx.patch b/patches.suse/vxlan-test-dev-flags-IFF_UP-before-calling-netif_rx.patch
new file mode 100644
index 0000000000..f6b6c1d76c
--- /dev/null
+++ b/patches.suse/vxlan-test-dev-flags-IFF_UP-before-calling-netif_rx.patch
@@ -0,0 +1,80 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 7 Feb 2019 12:27:38 -0800
+Subject: vxlan: test dev->flags & IFF_UP before calling netif_rx()
+Git-commit: 4179cb5a4c924cd233eaadd081882425bc98f44e
+Patch-mainline: v5.0-rc7
+References: networking-stable-19_02_20
+
+netif_rx() must be called under a strict contract.
+
+At device dismantle phase, core networking clears IFF_UP
+and flush_all_backlogs() is called after rcu grace period
+to make sure no incoming packet might be in a cpu backlog
+and still referencing the device.
+
+Most drivers call netif_rx() from their interrupt handler,
+and since the interrupts are disabled at device dismantle,
+netif_rx() does not have to check dev->flags & IFF_UP
+
+Virtual drivers do not have this guarantee, and must
+therefore make the check themselves.
+
+Otherwise we risk use-after-free and/or crashes.
+
+Note this patch also fixes a small issue that came
+with commit ce6502a8f957 ("vxlan: fix a use after free
+in vxlan_encap_bypass"), since the dev->stats.rx_dropped
+change was done on the wrong device.
+
+Fixes: d342894c5d2f ("vxlan: virtual extensible lan")
+Fixes: ce6502a8f957 ("vxlan: fix a use after free in vxlan_encap_bypass")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Petr Machata <petrm@mellanox.com>
+Cc: Ido Schimmel <idosch@mellanox.com>
+Cc: Roopa Prabhu <roopa@cumulusnetworks.com>
+Cc: Stefano Brivio <sbrivio@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ drivers/net/vxlan.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -2000,7 +2000,7 @@ static void vxlan_encap_bypass(struct sk
+ struct pcpu_sw_netstats *tx_stats, *rx_stats;
+ union vxlan_addr loopback;
+ union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
+- struct net_device *dev = skb->dev;
++ struct net_device *dev;
+ int len = skb->len;
+
+ tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
+@@ -2020,8 +2020,15 @@ static void vxlan_encap_bypass(struct sk
+ #endif
+ }
+
++ rcu_read_lock();
++ dev = skb->dev;
++ if (unlikely(!(dev->flags & IFF_UP))) {
++ kfree_skb(skb);
++ goto drop;
++ }
++
+ if (dst_vxlan->flags & VXLAN_F_LEARN)
+- vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, vni);
++ vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, vni);
+
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->tx_packets++;
+@@ -2034,8 +2041,10 @@ static void vxlan_encap_bypass(struct sk
+ rx_stats->rx_bytes += len;
+ u64_stats_update_end(&rx_stats->syncp);
+ } else {
++drop:
+ dev->stats.rx_dropped++;
+ }
++ rcu_read_unlock();
+ }
+
+ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
diff --git a/patches.suse/xen-netback-don-t-populate-the-hash-cache-on-XenBus-.patch b/patches.suse/xen-netback-don-t-populate-the-hash-cache-on-XenBus-.patch
new file mode 100644
index 0000000000..a6ec5e1d76
--- /dev/null
+++ b/patches.suse/xen-netback-don-t-populate-the-hash-cache-on-XenBus-.patch
@@ -0,0 +1,53 @@
+From: Igor Druzhinin <igor.druzhinin@citrix.com>
+Date: Thu, 28 Feb 2019 14:11:26 +0000
+Subject: xen-netback: don't populate the hash cache on XenBus disconnect
+Git-commit: a2288d4e355992d369c50c45d017a85f6061ff71
+Patch-mainline: v5.0
+References: networking-stable-19_03_07
+
+Occasionally, during the disconnection procedure on XenBus which
+includes hash cache deinitialization there might be some packets
+still in-flight on other processors. Handling of these packets includes
+hashing and hash cache population that finally results in hash cache
+data structure corruption.
+
+In order to avoid this we prevent hashing of those packets if there
+are no queues initialized. In that case RCU protection of queues guards
+the hash cache as well.
+
+Signed-off-by: Igor Druzhinin <igor.druzhinin@citrix.com>
+Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ drivers/net/xen-netback/hash.c | 2 ++
+ drivers/net/xen-netback/interface.c | 7 +++++++
+ 2 files changed, 9 insertions(+)
+
+--- a/drivers/net/xen-netback/hash.c
++++ b/drivers/net/xen-netback/hash.c
+@@ -435,6 +435,8 @@ void xenvif_init_hash(struct xenvif *vif
+ if (xenvif_hash_cache_size == 0)
+ return;
+
++ BUG_ON(vif->hash.cache.count);
++
+ spin_lock_init(&vif->hash.cache.lock);
+ INIT_LIST_HEAD(&vif->hash.cache.list);
+ }
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -153,6 +153,13 @@ static u16 xenvif_select_queue(struct ne
+ {
+ struct xenvif *vif = netdev_priv(dev);
+ unsigned int size = vif->hash.size;
++ unsigned int num_queues;
++
++ /* If queues are not set up internally - always return 0
++ * as the packet going to be dropped anyway */
++ num_queues = READ_ONCE(vif->num_queues);
++ if (num_queues < 1)
++ return 0;
+
+ if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
+ return fallback(dev, skb) % dev->real_num_tx_queues;
diff --git a/patches.suse/xen-netback-fix-occasional-leak-of-grant-ref-mapping.patch b/patches.suse/xen-netback-fix-occasional-leak-of-grant-ref-mapping.patch
new file mode 100644
index 0000000000..3b6d29cf9c
--- /dev/null
+++ b/patches.suse/xen-netback-fix-occasional-leak-of-grant-ref-mapping.patch
@@ -0,0 +1,63 @@
+From: Igor Druzhinin <igor.druzhinin@citrix.com>
+Date: Thu, 28 Feb 2019 12:48:03 +0000
+Subject: xen-netback: fix occasional leak of grant ref mappings under memory
+ pressure
+Git-commit: 99e87f56b48f490fb16b6e0f74691c1e664dea95
+Patch-mainline: v5.0
+References: networking-stable-19_03_07
+
+Zero-copy callback flag is not yet set on frag list skb at the moment
+xenvif_handle_frag_list() returns -ENOMEM. This eventually results in
+leaking grant ref mappings since xenvif_zerocopy_callback() is never
+called for these fragments. Those eventually build up and cause Xen
+to kill Dom0 as the slots get reused for new mappings:
+
+"d0v0 Attempt to implicitly unmap a granted PTE c010000329fce005"
+
+That behavior is observed under certain workloads where sudden spikes
+of page cache writes coexist with active atomic skb allocations from
+network traffic. Additionally, rework the logic to deal with frag_list
+deallocation in a single place.
+
+Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
+Signed-off-by: Igor Druzhinin <igor.druzhinin@citrix.com>
+Acked-by: Wei Liu <wei.liu2@citrix.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ drivers/net/xen-netback/netback.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -1074,11 +1074,6 @@ static int xenvif_handle_frag_list(struc
+ skb_frag_size_set(&frags[i], len);
+ }
+
+- /* Copied all the bits from the frag list -- free it. */
+- skb_frag_list_init(skb);
+- xenvif_skb_zerocopy_prepare(queue, nskb);
+- kfree_skb(nskb);
+-
+ /* Release all the original (foreign) frags. */
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ skb_frag_unref(skb, f);
+@@ -1147,6 +1142,8 @@ static int xenvif_tx_submit(struct xenvi
+ xenvif_fill_frags(queue, skb);
+
+ if (unlikely(skb_has_frag_list(skb))) {
++ struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
++ xenvif_skb_zerocopy_prepare(queue, nskb);
+ if (xenvif_handle_frag_list(queue, skb)) {
+ if (net_ratelimit())
+ netdev_err(queue->vif->dev,
+@@ -1155,6 +1152,9 @@ static int xenvif_tx_submit(struct xenvi
+ kfree_skb(skb);
+ continue;
+ }
++ /* Copied all the bits from the frag list -- free it. */
++ skb_frag_list_init(skb);
++ kfree_skb(nskb);
+ }
+
+ skb->dev = queue->vif->dev;
diff --git a/series.conf b/series.conf
index 5b56144e64..572f4af66b 100644
--- a/series.conf
+++ b/series.conf
@@ -3507,6 +3507,7 @@
patches.fixes/xfs-remove-bli-from-AIL-before-release-on-transactio.patch
patches.fixes/xfs-remove-double-underscore-integer-types.patch
patches.fixes/xfs-check-if-an-inode-is-cached-and-allocated.patch
+ patches.fixes/xfs-reflink-find-shared-should-take-a-transaction.patch
patches.fixes/xfs-rewrite-xfs_dq_get_next_id-using-xfs_iext_lookup.patch
patches.fixes/vfs-Add-page_cache_seek_hole_data-helper.patch
patches.fixes/vfs-Add-iomap_seek_hole-and-iomap_seek_data-helpers.patch
@@ -11044,6 +11045,7 @@
patches.fixes/crypto-skcipher-set-walk.iv-for-zero-length-inputs
patches.drivers/crypto-mcryptd-protect-the-per-CPU-queue-with-a-lock
patches.fixes/0009-xfs-move-xfs_iext_insert-tracepoint-to-report-useful-information.patch
+ patches.fixes/xfs-remove-dest-file-s-post-eof-preallocations-befor.patch
patches.fixes/xfs-allow-CoW-remap-transactions-to-use-reserve-bloc.patch
patches.fixes/xen-eliminate-wx-mappings.patch
patches.fixes/xen-balloon-mark-unallocated-host-memory-as-unusable.patch
@@ -11729,6 +11731,7 @@
patches.drivers/irqchip-gic-v3-Fix-the-driver-probe-fail-due-to-disa
patches.drivers/clocksource-drivers-stm32-Fix-kernel-panic-with-mult
patches.suse/hrtimer-Ensure-POSIX-compliance-relative-CLOCK_REALTIME-hrtimers.patch
+ patches.fixes/tracing-hrtimer-Fix-tracing-bugs-by-taking-all-clock.patch
patches.arch/x86-intel_rdt-enumerate-l2-code-and-data-prioritization-cdp-feature.patch
patches.arch/x86-intel_rdt-add-two-new-resources-for-l2-code-and-data-prioritization-cdp.patch
patches.arch/x86-intel_rdt-enable-l2-cdp-in-msr-ia32_l2_qos_cfg.patch
@@ -11848,7 +11851,11 @@
patches.fixes/fix-misannotated-out-of-line-copy_to_user
patches.fixes/0006-jffs2-Fix-use-after-free-bug-in-jffs2_iget-s-error-h.patch
patches.fixes/0004-iomap-report-collisions-between-directio-and-buffere.patch
+ patches.fixes/xfs-call-xfs_qm_dqattach-before-performing-reflink-o.patch
patches.fixes/xfs-preserve-i_rdev-when-recycling-a-reclaimable-inode.patch
+ patches.fixes/xfs-reflink-should-break-pnfs-leases-before-sharing-.patch
+ patches.fixes/xfs-allow-xfs_lock_two_inodes-to-take-different-EXCL.patch
+ patches.fixes/xfs-only-grab-shared-inode-locks-for-source-file-dur.patch
patches.fixes/iomap-warn-on-zero-length-mappings.patch
patches.suse/0113-md-raid1-raid10-silence-warning-about-wait-within-wa.patch
patches.suse/0114-md-r5cache-print-more-info-of-log-recovery.patch
@@ -15513,6 +15520,7 @@
patches.fixes/fs-dax-use-page-mapping-to-warn-if-truncate-collides.patch
patches.fixes/dax-introduce-CONFIG_DAX_DRIVER.patch
patches.fixes/dax-dm-allow-device-mapper-to-operate-without-dax-su.patch
+ patches.fixes/ring-buffer-Check-if-memory-is-available-before-allo.patch
patches.drivers/dmaengine-at_xdmac-fix-rare-residue-corruption
patches.drivers/dmaengine-pl330-fix-a-race-condition-in-case-of-thre
patches.drivers/dmaengine-qcom-bam_dma-get-num-channels-and-num-ees-
@@ -15942,6 +15950,7 @@
patches.drivers/0004-cpufreq-CPPC-Set-platform-specific-transition_delay_.patch
patches.suse/0001-btrfs-Fix-wrong-first_key-parameter-in-replace_path.patch
patches.suse/btrfs-Take-trans-lock-before-access-running-trans-in.patch
+ patches.fixes/xfs-cap-the-length-of-deduplication-requests.patch
patches.fixes/blk-mq-count-allocated-but-not-started-requests-in-i.patch
patches.fixes/blk-mq-fix-sysfs-inflight-counter.patch
patches.suse/0221-bcache-store-disk-name-in-struct-cache-and-struct-ca.patch
@@ -16410,6 +16419,7 @@
patches.drivers/nvme-use-the-changed-namespaces-list-log-to-clear-ns.patch
patches.fixes/restore-cond_resched-in-shrink_dcache_parent.patch
patches.fixes/rmdir-rename-do-shrink_dcache_parent-only-on-success.patch
+ patches.fixes/fs-avoid-fdput-after-failed-fdget-in-vfs_dedupe_file.patch
patches.drivers/hwmon-ltc2990-Fix-incorrect-conversion-of-negative-t
patches.drivers/hwmon-asus_atk0110-Replace-deprecated-device-registe
patches.drivers/spi-bcm63xx-hspi-Enable-the-clock-before-calling-clk
@@ -17938,6 +17948,7 @@
patches.fixes/x86-entry-64-remove-ebx-handling-from-error_entry-exit.patch
patches.arch/x86-boot-fix-if_changed-build-flip-flop-bug
patches.fixes/squashfs-more-metadata-hardening.patch
+ patches.fixes/0001-cxgb4-Added-missing-break-in-ndo_udp_tunnel_-add-del.patch
patches.suse/net-fix-amd-xgbe-flow-control-issue.patch
patches.suse/net-ena-Fix-use-of-uninitialized-DMA-address-bits-fi.patch
patches.fixes/vti6-fix-PMTU-caching-and-reporting-on-xmit.patch
@@ -18268,8 +18279,11 @@
patches.fixes/kconfig-fix-the-rule-of-mainmenu_stmt-symbol.patch
patches.arch/x86-i8259-add-missing-include-file
patches.drivers/net-usb-r8152-use-irqsave-in-USB-s-complete-callback.patch
+ patches.fixes/0001-cxgb4-Add-new-T5-PCI-device-id-0x50ae.patch
patches.drivers/net-hns3-rename-the-interface-for-init_client_instan.patch
patches.drivers/net-hns3-add-unlikely-for-error-check.patch
+ patches.fixes/0001-cxgb4-Add-support-for-FW_ETH_TX_PKT_VM_WR.patch
+ patches.fixes/0001-cxgb4-Support-ethtool-private-flags.patch
patches.drivers/net-smc-determine-port-attributes-independent-from-pnet-table.patch
patches.drivers/net-smc-add-pnetid-support.patch
patches.drivers/net-smc-optimize-consumer-cursor-updates.patch
@@ -18302,6 +18316,7 @@
patches.drivers/net-hns3-Fix-for-reset_level-default-assignment-prob.patch
patches.drivers/net-hns3-Fix-for-using-wrong-mask-and-shift-in-hclge.patch
patches.drivers/net-hns3-Fix-comments-for-hclge_get_ring_chain_from_.patch
+ patches.fixes/0001-cxgb4-do-not-return-DUPLEX_UNKNOWN-when-link-is-down.patch
patches.drivers/liquidio-fix-hang-when-re-binding-VF-host-drv-after-.patch
patches.drivers/qed-Add-qed-APIs-for-PHY-module-query.patch
patches.drivers/qede-Add-driver-callbacks-for-eeprom-module-query.patch
@@ -18743,6 +18758,7 @@
patches.drivers/power-generic-adc-battery-fix-out-of-bounds-write-wh
patches.drivers/power-generic-adc-battery-check-for-duplicate-proper
patches.drivers/power-supply-max77693_charger-fix-unintentional-fall
+ patches.fixes/vfs-limit-size-of-dedupe.patch
patches.fixes/vfs-fix-freeze-protection-in-mnt_want_write_file-for.patch
patches.fixes/fuse-fix-double-request_end.patch
patches.fixes/fuse-fix-unlocked-access-to-processing-queue.patch
@@ -19316,6 +19332,11 @@
patches.fixes/0001-dm-cache-destroy-migration_cache-if-cache-target-reg.patch
patches.fixes/0001-dm-fix-report-zone-remapping-to-account-for-partitio.patch
patches.fixes/0001-dm-linear-eliminate-linear_end_io-call-if-CONFIG_DM_.patch
+ patches.fixes/xfs-refactor-clonerange-preparation-into-a-separate-.patch
+ patches.fixes/xfs-zero-posteof-blocks-when-cloning-above-eof.patch
+ patches.fixes/xfs-update-ctime-and-remove-suid-before-cloning-file.patch
+ patches.fixes/xfs-fix-data-corruption-w-unaligned-dedupe-ranges.patch
+ patches.fixes/xfs-fix-data-corruption-w-unaligned-reflink-ranges.patch
patches.fixes/0001-dm-linear-fix-linear_end_io-conditional-definition.patch
patches.fixes/kbuild-allow-to-use-gcc-toolchain-not-in-clang-search-path.patch
patches.fixes/udp-Unbreak-modules-that-rely-on-external-__skb_recv.patch
@@ -19367,6 +19388,7 @@
patches.drivers/Input-elan_i2c-add-ACPI-ID-for-Lenovo-IdeaPad-330-15.patch
patches.drm/0001-drm-sun4i-Fix-an-ulong-overflow-in-the-dotclock-driv.patch
patches.fixes/perf-tools-fix-tracing_path_mount-proper-path.patch
+ patches.suse/0001-x86-tsc-Force-inlining-of-cyc2ns-bits.patch
patches.arch/x86-percpu-fix-this_cpu_read
patches.arch/x86-time-correct-the-attribute-on-jiffies-definition
patches.arch/x86-fpu-remove-second-definition-of-fpu-in-_fpu__restore_sig
@@ -19428,6 +19450,7 @@
patches.drivers/net-hns3-Fix-for-loopback-selftest-failed-problem.patch
patches.drivers/net-hns3-Fix-ping-exited-problem-when-doing-lp-selft.patch
patches.drivers/net-hns3-Preserve-vlan-0-in-hardware-table.patch
+ patches.fixes/0001-Collect-descriptors-of-all-ULD-and-LLD-hardware-queu.patch
patches.fixes/0001-cxgb4-add-per-rx-queue-counter-for-packet-errors.patch
patches.arch/s390-qeth-invoke-softirqs-after-napi_schedule
patches.drivers/net-ibm-fix-return-type-of-ndo_start_xmit-function.patch
@@ -19858,6 +19881,9 @@
patches.suse/net-stmmac-Fix-stmmac_mdio_reset-when-building-stmma.patch
patches.suse/openvswitch-Fix-push-pop-ethernet-validation.patch
patches.drivers/vhost-scsi-truncate-T10-PI-iov_iter-to-prot_bytes.patch
+ patches.fixes/vfs-exit-early-from-zero-length-remap-operations.patch
+ patches.fixes/xfs-fix-pagecache-truncation-prior-to-reflink.patch
+ patches.fixes/xfs-clean-up-xfs_reflink_remap_blocks-call-site.patch
patches.drm/drm-edid-Add-6-bpc-quirk-for-BOE-panel.patch
patches.drivers/ALSA-ca0106-Disable-IZD-on-SB0570-DAC-to-fix-audio-p.patch
patches.drivers/pwm-lpss-Release-runtime-pm-reference-from-the-drive.patch
@@ -20021,6 +20047,7 @@
patches.drivers/iommu-vt-d-use-memunmap-to-free-memremap
patches.fixes/nvme-fc-resolve-io-failures-during-connect.patch
patches.fixes/libceph-fall-back-to-sendmsg-for-slab-pages.patch
+ patches.fixes/xfs-flush-removing-page-cache-in-xfs_reflink_remap_p.patch
patches.suse/sctp-not-allow-to-set-asoc-prsctp_enable-by-sockopt.patch
patches.suse/sctp-not-increase-stream-s-incnt-before-sending-adds.patch
patches.drivers/net-ena-fix-crash-during-failed-resume-from-hibernat.patch
@@ -20917,9 +20944,11 @@
patches.drivers/platform-x86-Fix-unmet-dependency-warning-for-SAMSUN.patch
patches.fixes/CIFS-Do-not-count-ENODATA-as-failure-for-query-direc.patch
patches.suse/kernel-exit.c-release-ptraced-tasks-before-zap_pid_n.patch
+ patches.fixes/mm-migrate-Make-buffer_migrate_page_norefs-actually-.patch
patches.suse/mm-oom-fix-use-after-free-in-oom_kill_process.patch
patches.suse/mm-hwpoison-use-do_send_sig_info-instead-of-force_sig.patch
patches.suse/mm-migrate-don-t-rely-on-__PageMovable-of-newpage-after-unlocking-it.patch
+ patches.fixes/vfs-Avoid-softlockups-in-drop_pagecache_sb.patch
patches.fixes/md-raid5-fix-out-of-memory-during-raid-cache-recover.patch
patches.fixes/blk-mq-fix-a-hung-issue-when-fsync.patch
patches.suse/0002-Btrfs-fix-deadlock-when-allocating-tree-block-during.patch
@@ -20974,7 +21003,9 @@
patches.suse/net-dsa-slave-Don-t-propagate-flag-changes-on-down-s.patch
patches.fixes/mISDN-fix-a-race-in-dev_expire_timer.patch
patches.suse/rxrpc-bad-unlock-balance-in-rxrpc_recvmsg.patch
+ patches.suse/net-mlx5e-Don-t-overwrite-pedit-action-when-multiple.patch
patches.drivers/net-Don-t-default-Cavium-PTP-driver-to-y.patch
+ patches.suse/sit-check-if-IPv6-enabled-before-calling-ip6_err_gen.patch
patches.fixes/scsi-target-make-the-pi_prot_format-ConfigFS-path-re.patch
patches.drivers/soc-fsl-qbman-avoid-race-in-clearing-QMan-interrupt.patch
patches.fixes/ARM-iop32x-n2100-fix-PCI-IRQ-mapping.patch
@@ -20996,11 +21027,18 @@
patches.drivers/ALSA-usb-audio-Fix-implicit-fb-endpoint-setup-by-qui.patch
patches.drivers/ASoC-rsnd-fixup-rsnd_ssi_master_clk_start-user-count.patch
patches.fixes/vsock-cope-with-memory-allocation-failure-at-socket-.patch
+ patches.suse/vxlan-test-dev-flags-IFF_UP-before-calling-netif_rx.patch
+ patches.suse/net-fix-IPv6-prefix-route-residue.patch
patches.fixes/mac80211-Fix-Tx-aggregation-session-tear-down-with-I.patch
+ patches.suse/ipv6-propagate-genlmsg_reply-return-code.patch
patches.fixes/batman-adv-fix-uninit-value-in-batadv_interface_tx.patch
+ patches.suse/net-packet-fix-4gb-buffer-limit-due-to-overflow-chec.patch
patches.drivers/net-ena-fix-race-between-link-up-and-device-initaliz.patch
patches.drivers/net-ena-update-driver-version-from-2.0.2-to-2.0.3.patch
+ patches.suse/net_sched-fix-two-more-memory-leaks-in-cls_tcindex.patch
patches.fixes/team-avoid-complex-list-operations-in-team_nl_cmd_op.patch
+ patches.suse/sctp-call-gso_reset_checksum-when-computing-checksum.patch
+ patches.suse/dsa-mv88e6xxx-Ensure-all-pending-interrupts-are-hand.patch
patches.drm/drm-imx-ignore-plane-updates-on-disabled-crtcs.patch
patches.drm/0001-gpu-ipu-v3-Fix-i.MX51-CSI-control-registers-offset.patch
patches.drm/drm-imx-imx-ldb-add-missing-of_node_puts.patch
@@ -21028,9 +21066,14 @@
patches.fixes/mailbox-bcm-flexrm-mailbox-Fix-FlexRM-ring-flush-tim.patch
patches.fixes/mac80211-Free-mpath-object-when-rhashtable-insertion.patch
patches.fixes/mac80211-Restore-vif-beacon-interval-if-start-ap-fai.patch
+ patches.suse/net-Fix-for_each_netdev_feature-on-Big-endian.patch
+ patches.suse/net-validate-untrusted-gso-packets-without-csum-offl.patch
+ patches.suse/net-Add-header-for-usage-of-fls64.patch
patches.fixes/0001-mlxsw-__mlxsw_sp_port_headroom_set-Fix-a-use-of-loca.patch
patches.fixes/0001-net-stmmac-handle-endianness-in-dwmac4_get_timestamp.patch
patches.drivers/qmi_wwan-apply-SET_DTR-quirk-to-Sierra-WP7607.patch
+ patches.suse/tcp-tcp_v4_err-should-be-more-careful.patch
+ patches.suse/net-Do-not-allocate-page-fragments-that-are-not-skb-.patch
patches.fixes/net-crypto-set-sk-to-NULL-when-af_alg_release.patch
patches.fixes/0001-net-stmmac-Fix-a-race-in-EEE-enable-callback.patch
patches.fixes/vhost-correctly-check-the-return-value-of-translate_.patch
@@ -21046,24 +21089,49 @@
patches.drivers/clk-sunxi-ng-v3s-Fix-TCON-reset-de-assert-bit.patch
patches.drivers/clk-sunxi-A31-Fix-wrong-AHB-gate-number.patch
patches.suse/kallsyms-handle-too-long-symbols-in-kallsyms-c.patch
+ patches.fixes/KEYS-user-Align-the-payload-buffer.patch
+ patches.fixes/KEYS-always-initialize-keyring_index_key-desc_len.patch
patches.drivers/scsi-libiscsi-fix-race-between-iscsi_xmit_task-and-iscsi_complete_task
patches.fixes/scsi-core-reset-host-byte-in-DID_NEXUS_FAILURE-case.patch
+ patches.suse/missing-barriers-in-some-of-unix_sock-addr-and-path-.patch
+ patches.suse/net-avoid-false-positives-in-untrusted-gso-validatio.patch
+ patches.suse/ipvlan-disallow-userns-cap_net_admin-to-change-globa.patch
patches.fixes/mac80211_hwsim-propagate-genlmsg_reply-return-code.patch
patches.drivers/bnxt_en-Fix-typo-in-firmware-message-timeout-logic.patch
patches.drivers/bnxt_en-Wait-longer-for-the-firmware-message-respons.patch
+ patches.suse/net-Set-rtm_table-to-RT_TABLE_COMPAT-for-ipv6-for-ta.patch
patches.fixes/mdio_bus-Fix-use-after-free-on-device_register-fails.patch
patches.fixes/bpf-lpm-fix-lookup-bug-in-map_delete_elem.patch
patches.fixes/0001-nfp-bpf-fix-ALU32-high-bits-clearance-bug.patch
+ patches.suse/net-x25-fix-a-race-in-x25_bind.patch
patches.fixes/0001-mm-enforce-min-addr-even-if-capable-in-expand_downwa.patch
patches.drivers/mmc-spi-Fix-card-detection-during-probe.patch
+ patches.drivers/mmc-tmio_mmc_core-don-t-claim-spurious-interrupts.patch
patches.drm/drm-Block-fb-changes-for-async-plane-updates.patch
patches.drivers/iommu-dmar-fix-buffer-overflow-during-pci-bus-notification
+ patches.suse/tun-fix-blocking-read.patch
+ patches.suse/net-socket-set-sock-sk-to-NULL-after-calling-proto_o.patch
+ patches.suse/tun-remove-unnecessary-memory-barrier.patch
+ patches.suse/net-Add-__icmp_send-helper.patch
+ patches.suse/net-avoid-use-IPCB-in-cipso_v4_error.patch
+ patches.suse/ipv4-Return-error-for-RTA_VIA-attribute.patch
+ patches.suse/ipv6-Return-error-for-RTA_VIA-attribute.patch
+ patches.suse/mpls-Return-error-for-RTA_GATEWAY-attribute.patch
+ patches.suse/hv_netvsc-Fix-IP-header-checksum-for-coalesced-packe.patch
+ patches.suse/tipc-fix-race-condition-causing-hung-sendto.patch
+ patches.suse/bnxt_en-Drop-oversize-TX-packets-to-prevent-errors.patch
patches.fixes/0001-net-phy-Micrel-KSZ8061-link-failure-after-cable-conn.patch
+ patches.suse/net-nfc-Fix-NULL-dereference-on-nfc_llcp_build_tlv-f.patch
+ patches.suse/netlabel-fix-out-of-bounds-memory-accesses.patch
patches.suse/net-netem-fix-skb-length-BUG_ON-in-__skb_to_sgvec.patch
+ patches.suse/xen-netback-fix-occasional-leak-of-grant-ref-mapping.patch
+ patches.suse/xen-netback-don-t-populate-the-hash-cache-on-XenBus-.patch
+ patches.suse/net-dsa-mv88e6xxx-Fix-u64-statistics.patch
patches.fixes/bpf-decrease-usercnt-if-bpf_map_new_fd-fails-in-bpf_.patch
patches.fixes/bpf-drop-refcount-if-bpf_map_new_fd-fails-in-map_cre.patch
patches.fixes/bpf-fix-sanitation-rewrite-in-case-of-non-pointers.patch
patches.drivers/geneve-correctly-handle-ipv6.disable-module-paramete.patch
+ patches.suse/net-sit-fix-memory-leak-in-sit_init_net.patch
patches.arch/x86-uaccess-don-t-leak-the-ac-flag-into-_put_user-value-evaluation.patch
patches.fixes/copy_mount_string-Limit-string-length-to-PATH_MAX.patch
patches.fixes/exec-Fix-mem-leak-in-kernel_read_file.patch
@@ -21082,6 +21150,7 @@
patches.drivers/spi-ti-qspi-Fix-mmap-read-when-more-than-one-CS-in-u.patch
patches.drivers/spi-pxa2xx-Setup-maximum-supported-DMA-transfer-leng.patch
patches.drivers/leds-lp55xx-fix-null-deref-on-firmware-load-failure.patch
+ patches.fixes/0001-cxgb4-cxgb4vf-Link-management-changes.patch
patches.drivers/e1000e-fix-cyclic-resets-at-link-up-with-active-tx.patch
patches.drivers/wlcore-Fix-memory-leak-in-case-wl12xx_fetch_firmware.patch
patches.drivers/cw1200-fix-missing-unlock-on-error-in-cw1200_hw_scan.patch
@@ -21091,6 +21160,8 @@
patches.drivers/iwlwifi-mvm-fix-A-MPDU-reference-assignment.patch
patches.fixes/0001-iwiwifi-fix-bad-monitor-buffer-register-addresses.patch
patches.drivers/mt7601u-bump-supported-EEPROM-version.patch
+ patches.fixes/0001-cxgb4-cxgb4vf-Add-support-for-SGE-doorbell-queue-tim.patch
+ patches.fixes/0001-cxgb4-Add-capability-to-get-set-SGE-Doorbell-Queue-T.patch
patches.fixes/0001-cxgb4-Mask-out-interrupts-that-are-not-enabled.patch
patches.drivers/iwlwifi-mvm-fix-RSS-config-command.patch
patches.fixes/0001-iwlwifi-fix-send-hcmd-timeout-recovery-flow.patch
@@ -21115,6 +21186,8 @@
patches.drivers/sky2-Disable-MSI-on-Dell-Inspiron-1545-and-Gateway-P.patch
patches.fixes/0001-net-dsa-mv88e6xxx-handle-unknown-duplex-modes-gracef.patch
patches.fixes/0001-net-sysfs-Fix-mem-leak-in-netdev_register_kobject.patch
+ patches.suse/qmi_wwan-Add-support-for-Quectel-EG12-EM12.patch
+ patches.fixes/crypto-pcbc-remove-bogus-memcpy-s-with-src-dest.patch
patches.fixes/crypto-hash-set-CRYPTO_TFM_NEED_KEY-if-setkey-fails.patch
patches.fixes/crypto-brcm-Fix-some-set-but-not-used-warning.patch
patches.fixes/crypto-tgr192-fix-unaligned-memory-access.patch
@@ -21201,9 +21274,11 @@
patches.suse/powerpc-livepatch-small-cleanups-in-save_stack_trace_tsk_reliable.patch
patches.arch/powerpc-pseries-export-timebase-register-sample-in-l.patch
patches.arch/powerpc-Fix-32-bit-KVM-PR-lockup-and-host-crash-with.patch
+ patches.arch/powerpc-64s-Fix-logic-when-handling-unknown-CPU-feat.patch
patches.arch/powerpc-powernv-Don-t-reprogram-SLW-image-on-every-K.patch
patches.arch/powerpc-hugetlb-Handle-mmap_min_addr-correctly-in-ge.patch
patches.arch/powerpc-mm-hash-Handle-mmap_min_addr-correctly-in-ge.patch
+ patches.arch/powerpc-powernv-Make-opal-log-only-readable-by-root.patch
patches.arch/powerpc-powernv-ioda-Fix-locked_vm-counting-for-memo.patch
patches.arch/powerpc-kvm-Save-and-restore-host-AMR-IAMR-UAMOR.patch
patches.arch/powerpc-mm-Check-secondary-hash-page-table.patch
@@ -21298,10 +21373,16 @@
patches.drivers/tpm-Fix-some-name-collisions-with-drivers-char-tpm.h.patch
patches.fixes/tipc-fix-RDM-DGRAM-connect-regression.patch
patches.fixes/0001-ipv4-route-fail-early-when-inet-dev-is-missing.patch
+ patches.suse/net-hsr-fix-memory-leak-in-hsr_dev_finalize.patch
+ patches.suse/ravb-Decrease-TxFIFO-depth-of-Q3-and-Q2-to-one.patch
patches.drivers/enic-fix-build-warning-without-CONFIG_CPUMASK_OFFSTA.patch
+ patches.suse/net-hsr-fix-possible-crash-in-add_timer.patch
+ patches.suse/route-set-the-deleted-fnhe-fnhe_daddr-to-0-in-ip_del.patch
patches.fixes/0001-vxlan-Fix-GRO-cells-race-condition-between-receive-a.patch
patches.fixes/0001-tcp-handle-inet_csk_reqsk_queue_add-failures.patch
patches.fixes/bpf-fix-replace_map_fd_with_map_ptr-s-ldimm64-second.patch
+ patches.suse/rxrpc-Fix-client-call-queueing-waiting-for-channel.patch
+ patches.suse/net-x25-fix-use-after-free-in-x25_device_event.patch
patches.fixes/0001-vxlan-test-dev-flags-IFF_UP-before-calling-gro_cells.patch
patches.fixes/0001-gro_cells-make-sure-device-is-up-in-gro_cells_receiv.patch
patches.drivers/input-raspberrypi-ts-select-config_input_polldev.patch
@@ -21380,6 +21461,8 @@
patches.drivers/dmaengine-mv_xor-Use-correct-device-for-DMA-API.patch
patches.drivers/dmaengine-imx-dma-fix-warning-comparison-of-distinct.patch
patches.drivers/dmaengine-tegra-avoid-overflow-of-byte-tracking.patch
+ patches.suse/net-sit-fix-UBSAN-Undefined-behaviour-in-check_6rd.patch
+ patches.suse/net-x25-reset-state-in-x25_connect.patch
patches.fixes/0001-net-mlx5e-IPoIB-Fix-RX-checksum-statistics-update.patch
patches.fixes/0001-net-mlx4_core-Fix-reset-flow-when-in-command-polling.patch
patches.fixes/0001-net-mlx4_core-Fix-locking-in-SRIOV-mode-when-switchi.patch
@@ -21429,6 +21512,7 @@
patches.fixes/SMB3-Fix-SMB3.1.1-guest-mounts-to-Samba.patch
patches.drivers/thermal-bcm2835-Fix-crash-in-bcm2835_thermal_debugfs.patch
patches.arch/thermal-intel_powerclamp-fix-truncated-kthread-name.patch
+ patches.fixes/0001-futex-Ensure-that-futex-address-is-aligned-in-handle.patch
patches.fixes/ext4-fix-NULL-pointer-dereference-while-journal-is-a.patch
patches.fixes/ext4-fix-data-corruption-caused-by-unaligned-direct-.patch
patches.fixes/ext4-add-missing-brelse-in-add_new_gdb_meta_bg.patch
@@ -21443,6 +21527,7 @@
patches.fixes/NFSv4.1-don-t-free-interrupted-slot-on-open.patch
patches.fixes/NFS-Fix-a-typo-in-nfs_init_timeout_values.patch
patches.drivers/mISDN-hfcpci-Test-both-vendor-device-ID-for-Digium-H.patch
+ patches.suse/net-packet-Set-__GFP_NOWARN-upon-allocation-in-alloc.patch
patches.fixes/rhashtable-Still-do-rehash-when-we-get-EEXIST.patch
patches.fixes/bpf-do-not-restore-dst_reg-when-cur_state-is-freed.patch
patches.drm/0001-drm-rockchip-vop-reset-scale-mode-when-win-is-disabl.patch
@@ -21485,6 +21570,8 @@
patches.drivers/serial-sh-sci-Fix-setting-SCSCR_TIE-while-transferri.patch
patches.drivers/Disable-kgdboc-failed-by-echo-space-to-sys-module-kg.patch
patches.drivers/staging-rtl8712-uninitialized-memory-in-read_bbreg_h.patch
+ patches.drivers/staging-vt6655-Fix-interrupt-race-condition-on-devic.patch
+ patches.drivers/staging-vt6655-Remove-vif-check-from-vnt_interrupt.patch
patches.drivers/gpio-adnp-Fix-testing-wrong-value-in-adnp_gpio_direc.patch
patches.drivers/gpio-of-Fix-of_gpiochip_add-error-path.patch
patches.drivers/leds-pca9532-fix-a-potential-NULL-pointer-dereferenc.patch
@@ -21494,13 +21581,23 @@
patches.fixes/batman-adv-Reduce-tt_local-hash-refcnt-only-for-remo.patch
patches.fixes/batman-adv-Reduce-tt_global-hash-refcnt-only-for-rem.patch
patches.drivers/fm10k-Fix-a-potential-NULL-pointer-dereference.patch
+ patches.suse/kcm-switch-order-of-device-registration-to-fix-a-cra.patch
patches.drivers/ibmvnic-Fix-completion-structure-initialization.patch
patches.drm/drm-i915-gvt-do-not-deliver-a-workload-if-its-creati.patch
patches.drm/0003-drm-i915-gvt-do-not-let-pin-count-of-shadow-mm-go-ne.patch
+ patches.drivers/vfio-type1-limit-dma-mappings-per-container
patches.fixes/dm-disable-DISCARD-if-the-underlying-storage-no-long.patch
patches.fixes/mm-huge_memory.c-fix-modifying-of-page-protection-by-insert_pfn_pmd.patch
patches.drivers/tpm-Fix-the-type-of-the-return-value-in-calc_tpm2_ev.patch
+ patches.drivers/NFC-nci-Add-some-bounds-checking-in-nci_hci_cmd_rece.patch
patches.drivers/Bluetooth-btusb-request-wake-pin-with-NOAUTOEN.patch
+ patches.drivers/ALSA-seq-Fix-OOB-reads-from-strlcpy.patch
+ patches.drivers/ALSA-hda-Add-two-more-machines-to-the-power_save_bla.patch
+ patches.drivers/iommu-amd-set-exclusion-range-correctly
+
+ # davem/net
+ patches.drivers/ibmvnic-Enable-GRO.patch
+ patches.drivers/ibmvnic-Fix-netdev-feature-clobbering-during-a-reset.patch
# davem/net-next
patches.drivers/ibmvnic-Report-actual-backing-device-speed-and-duple.patch
@@ -21600,6 +21697,7 @@
patches.suse/0004-x86-enter-Create-macros-to-restrict-unrestrict-Indir.patch
patches.suse/0005-x86-enter-Use-IBRS-on-syscall-and-interrupts.patch
patches.suse/IBRS-forbid-shooting-in-foot.patch
+ patches.suse/do-not-default-to-ibrs-on-skl.patch
patches.suse/sched-do-not-re-read-h_load_next-during-hierarchical-load-calculation.patch
@@ -21663,9 +21761,6 @@
patches.kabi/0001-hwpoison-memory_hotplug-allow-hwpoisoned-pages-to-be-kabi.patch
- patches.fixes/vfs-Avoid-softlockups-in-drop_pagecache_sb.patch
- patches.fixes/mm-migrate-Make-buffer_migrate_page_norefs-actually-.patch
-
# bsc#1127731
patches.suse/mm-memory_hotplug-fix-scan_movable_pages-for-giganti.patch
@@ -22200,6 +22295,7 @@
patches.kabi/kabi-x86-speculation-fix-cpu_tlbstate-issue.patch
patches.kabi/hid-debug-kfifo-kabi-workaround.patch
patches.kabi/kabi-protect-vhost_log_write.patch
+ patches.kabi/kabi-restore-icmp_send.patch
patches.kabi/nvme-kABI-fixes-for-nvme_subsystem.patch
diff --git a/supported.conf b/supported.conf
index 5a14d345ef..6b140a45b4 100644
--- a/supported.conf
+++ b/supported.conf
@@ -1216,7 +1216,7 @@
drivers/mmc/host/bcm2835 # RPi3 (bsc#983145)
- drivers/mmc/host/cb710-mmc
drivers/mmc/host/dw_mmc
-+base drivers/mmc/host/dw_mmc-bluefield # bsc#1118752
+ drivers/mmc/host/dw_mmc-bluefield # bsc#1118752
- drivers/mmc/host/dw_mmc-exynos
drivers/mmc/host/dw_mmc-k3
drivers/mmc/host/dw_mmc-pci