Home Home > GIT Browse > SLE12-SP4-AZURE
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohannes Thumshirn <jthumshirn@suse.de>2019-05-15 09:11:11 +0200
committerJohannes Thumshirn <jthumshirn@suse.de>2019-05-15 09:11:11 +0200
commitefaf05db39f2eb6582d53d0ab26255ad2b2d489a (patch)
tree1f87892f1e7b1436c8c2c0e006592d12f3eb13d4
parent8f3e76bcfc6832049fc4f5a329ab19aa36f7ec88 (diff)
parentc190c41fb2e45114027544fa93d23be179c35bf0 (diff)
Merge remote-tracking branch 'origin/SLE15' into SLE12-SP4
Conflicts: blacklist.conf
-rw-r--r--blacklist.conf2
-rw-r--r--patches.arch/intel_idle-add-support-for-Jacobsville.patch13
-rw-r--r--patches.arch/kvm-x86-report-stibp-on-get_supported_cpuid.patch47
-rw-r--r--patches.arch/locking-atomics-asm-generic-move-some-macros-from-linux-bitops-h-to-a-new-linux-bits-h-file.patch90
-rw-r--r--patches.arch/powercap-intel_rapl-add-support-for-Jacobsville.patch6
-rw-r--r--patches.arch/x86-bugs-add-amd-s-variant-of-ssb_no.patch4
-rw-r--r--patches.arch/x86-cpu-Add-Atom-Tremont-Jacobsville.patch10
-rw-r--r--patches.arch/x86-cpu-sanitize-fam6_atom-naming.patch602
-rw-r--r--patches.arch/x86-kvm-expose-x86_feature_md_clear-to-guests.patch45
-rw-r--r--patches.arch/x86-kvm-vmx-add-mds-protection-when-l1d-flush-is-not-active.patch52
-rw-r--r--patches.arch/x86-msr-index-cleanup-bit-defines.patch102
-rw-r--r--patches.arch/x86-speculation-consolidate-cpu-whitelists.patch167
-rw-r--r--patches.arch/x86-speculation-mds-add-basic-bug-infrastructure-for-mds.patch148
-rw-r--r--patches.arch/x86-speculation-mds-add-bug_msbds_only.patch86
-rw-r--r--patches.arch/x86-speculation-mds-add-mds-full-nosmt-cmdline-option.patch85
-rw-r--r--patches.arch/x86-speculation-mds-add-mds_clear_cpu_buffers.patch216
-rw-r--r--patches.arch/x86-speculation-mds-add-mitigation-control-for-mds.patch186
-rw-r--r--patches.arch/x86-speculation-mds-add-mitigation-mode-vmwerv.patch131
-rw-r--r--patches.arch/x86-speculation-mds-add-mitigations-support-for-mds.patch62
-rw-r--r--patches.arch/x86-speculation-mds-add-smt-warning-message.patch50
-rw-r--r--patches.arch/x86-speculation-mds-add-sysfs-reporting-for-mds.patch120
-rw-r--r--patches.arch/x86-speculation-mds-clear-cpu-buffers-on-exit-to-user.patch192
-rw-r--r--patches.arch/x86-speculation-mds-conditionally-clear-cpu-buffers-on-idle-entry.patch224
-rw-r--r--patches.arch/x86-speculation-mds-print-smt-vulnerable-on-msbds-with-mitigations-off.patch49
-rw-r--r--patches.arch/x86-speculation-move-arch_smt_update-call-to-after-mitigation-decisions.patch45
-rw-r--r--patches.arch/x86-speculation-simplify-the-cpu-bug-detection-logic.patch84
-rw-r--r--patches.arch/x86-speculation-support-enhanced-ibrs-on-future-cpus.patch12
-rw-r--r--patches.drivers/iommu-vt-d-don-t-request-page-request-irq-under-dmar_global_lock101
-rw-r--r--patches.drivers/iommu-vt-d-make-kernel-parameter-igfx_off-work-with-viommu42
-rw-r--r--patches.drivers/iommu-vt-d-set-intel_iommu_gfx_mapped-correctly49
-rw-r--r--patches.drivers/net-ibmvnic-Update-MAC-address-settings-after-adapte.patch160
-rw-r--r--patches.drivers/net-ibmvnic-Update-carrier-state-after-link-state-ch.patch66
-rw-r--r--patches.drivers/phy-sun4i-usb-Make-sure-to-disable-PHY0-passby-for-p.patch50
-rw-r--r--patches.drivers/sc16is7xx-move-label-err_spi-to-correct-section.patch49
-rw-r--r--patches.drivers/sc16is7xx-put-err_spi-and-err_i2c-into-correct-ifdef.patch45
-rw-r--r--patches.drm/0004-drm-i915-gvt-Fix-incorrect-mask-of-mmio-0x22028-in-g.patch39
-rw-r--r--patches.drm/0005-drm-meson-add-size-and-alignment-requirements-for-du.patch53
-rw-r--r--patches.fixes/0001-btrfs-qgroup-Move-reserved-data-accounting-from-btrf.patch21
-rw-r--r--patches.fixes/0001-dt-bindings-net-Fix-a-typo-in-the-phy-mode-list-for-.patch37
-rw-r--r--patches.fixes/net-rds-force-to-destroy-connection-if-t_sock-is-NUL.patch143
-rw-r--r--patches.fixes/nvme-multipath-avoid-crash-on-invalid-subsystem-cntl.patch10
-rw-r--r--patches.fixes/vfio-mdev-Avoid-release-parent-reference-during-erro.patch40
-rw-r--r--patches.fixes/vfio-mdev-Fix-aborting-mdev-child-device-removal-if-.patch77
-rw-r--r--patches.fixes/vfio-pci-use-correct-format-characters.patch83
-rw-r--r--patches.fixes/vhost-vsock-fix-reset-orphans-race-with-close-timeou.patch65
-rw-r--r--patches.fixes/vsock-virtio-fix-kernel-panic-after-device-hot-unplu.patch103
-rw-r--r--patches.fixes/vsock-virtio-fix-kernel-panic-from-virtio_transport_.patch109
-rw-r--r--patches.fixes/vsock-virtio-reset-connected-sockets-on-device-remov.patch37
-rw-r--r--patches.suse/0001-btrfs-Factor-out-common-delayed-refs-init-code.patch85
-rw-r--r--patches.suse/0001-btrfs-track-refs-in-a-rb_tree-instead-of-a-list.patch404
-rw-r--r--patches.suse/0001-drm-ttm-Remove-warning-about-inconsistent-mapping-in.patch33
-rw-r--r--patches.suse/0002-btrfs-Use-init_delayed_ref_common-in-add_delayed_tre.patch75
-rw-r--r--patches.suse/0003-btrfs-Use-init_delayed_ref_common-in-add_delayed_dat.patch73
-rw-r--r--patches.suse/0003-x86-idle-Control-Indirect-Branch-Speculation-in-idle.patch16
-rw-r--r--patches.suse/0004-btrfs-Open-code-add_delayed_tree_ref.patch128
-rw-r--r--patches.suse/0005-btrfs-Open-code-add_delayed_data_ref.patch125
-rw-r--r--patches.suse/0006-btrfs-Introduce-init_delayed_ref_head.patch102
-rw-r--r--patches.suse/0007-btrfs-Use-init_delayed_ref_head-in-add_delayed_ref_h.patch102
-rw-r--r--patches.suse/0008-btrfs-split-delayed-ref-head-initialization-and-addi.patch159
-rw-r--r--patches.suse/btrfs-Take-trans-lock-before-access-running-trans-in.patch10
-rw-r--r--patches.suse/btrfs-fix-race-condition-between-delayed-refs-and-blockgroup-removal.patch16
-rw-r--r--patches.suse/lpfc-validate-command-in-lpfc_sli4_scmd_to_wqidx_dis.patch32
-rw-r--r--patches.suse/revert-btrfs-qgroup-move-half-of-the-qgroup-accounting-time-out-of-commit-trans.patch88
-rw-r--r--series.conf53
64 files changed, 5603 insertions, 107 deletions
diff --git a/blacklist.conf b/blacklist.conf
index dc1945355f..f3540b02d0 100644
--- a/blacklist.conf
+++ b/blacklist.conf
@@ -1114,3 +1114,5 @@ f58213637206e190453e3bd91f98f535566290a3 # regulator: missing regulator_lock() A
f7a621728a6a23bfd2c6ac4d3e42e1303aefde0f # regulator: missing regulator_lock() API in SLE15
8be64b6d87bd47d81753b60ddafe70102ebfd76b # regulator: missing regulator_lock() API in SLE15
401e7e88d4ef80188ffa07095ac00456f901b8c4 # base patch missing in SLE12-SP4
+98fdaaca9537b997062f1abc0aa87c61b50ce40a # Duplicate of fc89a38d99d4b1b33ca5b0e2329f5ddea02ecfb5: drm/i915/opregion: fix version check
+a0f52c3d357af218a9c1f7cd906ab70426176a1a # Duplicate of 16eb0f34cdf4cf04cd92762c7a79f98aa51e053f: drm/i915/opregion: rvda is relative from opregion base in opregion 2.1+
diff --git a/patches.arch/intel_idle-add-support-for-Jacobsville.patch b/patches.arch/intel_idle-add-support-for-Jacobsville.patch
index 81b2bc5dba..9e2a8e615b 100644
--- a/patches.arch/intel_idle-add-support-for-Jacobsville.patch
+++ b/patches.arch/intel_idle-add-support-for-Jacobsville.patch
@@ -13,21 +13,16 @@ Signed-off-by: Zhang Rui <rui.zhang@intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Michal Suchanek <msuchanek@suse.de>
---
- drivers/idle/intel_idle.c | 1 +
+ drivers/idle/intel_idle.c | 1 +
1 file changed, 1 insertion(+)
-diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
-index 216d7ec88c0c..008eb4d58a86 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
-@@ -1099,6 +1099,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
+@@ -1099,6 +1099,7 @@ static const struct x86_cpu_id intel_idl
ICPU(INTEL_FAM6_ATOM_GOLDMONT, idle_cpu_bxt),
- ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, idle_cpu_bxt),
- ICPU(INTEL_FAM6_ATOM_DENVERTON, idle_cpu_dnv),
+ ICPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, idle_cpu_bxt),
+ ICPU(INTEL_FAM6_ATOM_GOLDMONT_X, idle_cpu_dnv),
+ ICPU(INTEL_FAM6_ATOM_TREMONT_X, idle_cpu_dnv),
{}
};
---
-2.20.1
-
diff --git a/patches.arch/kvm-x86-report-stibp-on-get_supported_cpuid.patch b/patches.arch/kvm-x86-report-stibp-on-get_supported_cpuid.patch
new file mode 100644
index 0000000000..bec39883f1
--- /dev/null
+++ b/patches.arch/kvm-x86-report-stibp-on-get_supported_cpuid.patch
@@ -0,0 +1,47 @@
+From: Eduardo Habkost <ehabkost@redhat.com>
+Date: Wed, 5 Dec 2018 17:19:56 -0200
+Subject: kvm: x86: Report STIBP on GET_SUPPORTED_CPUID
+Git-commit: d7b09c827a6cf291f66637a36f46928dd1423184
+Patch-mainline: v5.0-rc1
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+Months ago, we have added code to allow direct access to MSR_IA32_SPEC_CTRL
+to the guest, which makes STIBP available to guests. This was implemented
+by commits d28b387fb74d ("KVM/VMX: Allow direct access to
+MSR_IA32_SPEC_CTRL") and b2ac58f90540 ("KVM/SVM: Allow direct access to
+MSR_IA32_SPEC_CTRL").
+
+However, we never updated GET_SUPPORTED_CPUID to let userspace know that
+STIBP can be enabled in CPUID. Fix that by updating
+kvm_cpuid_8000_0008_ebx_x86_features and kvm_cpuid_7_0_edx_x86_features.
+
+Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
+Reviewed-by: Jim Mattson <jmattson@google.com>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kvm/cpuid.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -367,7 +367,8 @@ static inline int __do_cpuid_ent(struct
+
+ /* cpuid 0x80000008.ebx */
+ const u32 kvm_cpuid_8000_0008_ebx_x86_features =
+- F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) | F(AMD_SSB_NO);
++ F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
++ F(AMD_SSB_NO) | F(AMD_STIBP);
+
+ /* cpuid 0xC0000001.edx */
+ const u32 kvm_cpuid_C000_0001_edx_x86_features =
+@@ -395,7 +396,7 @@ static inline int __do_cpuid_ent(struct
+ /* cpuid 7.0.edx*/
+ const u32 kvm_cpuid_7_0_edx_x86_features =
+ F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
+- F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES);
++ F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP);
+
+ /* all calls to cpuid_count() should be made on the same cpu */
+ get_cpu();
diff --git a/patches.arch/locking-atomics-asm-generic-move-some-macros-from-linux-bitops-h-to-a-new-linux-bits-h-file.patch b/patches.arch/locking-atomics-asm-generic-move-some-macros-from-linux-bitops-h-to-a-new-linux-bits-h-file.patch
new file mode 100644
index 0000000000..e431bdf60d
--- /dev/null
+++ b/patches.arch/locking-atomics-asm-generic-move-some-macros-from-linux-bitops-h-to-a-new-linux-bits-h-file.patch
@@ -0,0 +1,90 @@
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 19 Jun 2018 13:53:08 +0100
+Subject: locking/atomics, asm-generic: Move some macros from <linux/bitops.h>
+ to a new <linux/bits.h> file
+Git-commit: 8bd9cb51daac89337295b6f037b0486911e1b408
+Patch-mainline: v4.19-rc1
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+In preparation for implementing the asm-generic atomic bitops in terms
+of atomic_long_*(), we need to prevent <asm/atomic.h> implementations from
+pulling in <linux/bitops.h>. A common reason for this include is for the
+BITS_PER_BYTE definition, so move this and some other BIT() and masking
+macros into a new header file, <linux/bits.h>.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-arm-kernel@lists.infradead.org
+Cc: yamada.masahiro@socionext.com
+Link: https://lore.kernel.org/lkml/1529412794-17720-4-git-send-email-will.deacon@arm.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ include/linux/bitops.h | 21 +--------------------
+ include/linux/bits.h | 26 ++++++++++++++++++++++++++
+ 2 files changed, 27 insertions(+), 20 deletions(-)
+
+--- a/include/linux/bitops.h
++++ b/include/linux/bitops.h
+@@ -1,28 +1,9 @@
+ #ifndef _LINUX_BITOPS_H
+ #define _LINUX_BITOPS_H
+ #include <asm/types.h>
++#include <linux/bits.h>
+
+-#ifdef __KERNEL__
+-#define BIT(nr) (1UL << (nr))
+-#define BIT_ULL(nr) (1ULL << (nr))
+-#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
+-#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
+-#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
+-#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
+-#define BITS_PER_BYTE 8
+ #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+-#endif
+-
+-/*
+- * Create a contiguous bitmask starting at bit position @l and ending at
+- * position @h. For example
+- * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+- */
+-#define GENMASK(h, l) \
+- (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+-
+-#define GENMASK_ULL(h, l) \
+- (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+ extern unsigned int __sw_hweight8(unsigned int w);
+ extern unsigned int __sw_hweight16(unsigned int w);
+--- /dev/null
++++ b/include/linux/bits.h
+@@ -0,0 +1,26 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef __LINUX_BITS_H
++#define __LINUX_BITS_H
++#include <asm/bitsperlong.h>
++
++#define BIT(nr) (1UL << (nr))
++#define BIT_ULL(nr) (1ULL << (nr))
++#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
++#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
++#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
++#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
++#define BITS_PER_BYTE 8
++
++/*
++ * Create a contiguous bitmask starting at bit position @l and ending at
++ * position @h. For example
++ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
++ */
++#define GENMASK(h, l) \
++ (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
++
++#define GENMASK_ULL(h, l) \
++ (((~0ULL) - (1ULL << (l)) + 1) & \
++ (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
++
++#endif /* __LINUX_BITS_H */
diff --git a/patches.arch/powercap-intel_rapl-add-support-for-Jacobsville.patch b/patches.arch/powercap-intel_rapl-add-support-for-Jacobsville.patch
index 5a3982f1bf..5a1ed53480 100644
--- a/patches.arch/powercap-intel_rapl-add-support-for-Jacobsville.patch
+++ b/patches.arch/powercap-intel_rapl-add-support-for-Jacobsville.patch
@@ -13,15 +13,15 @@ Signed-off-by: Zhang Rui <rui.zhang@intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Michal Suchanek <msuchanek@suse.de>
---
- drivers/powercap/intel_rapl.c | 1 +
+ drivers/powercap/intel_rapl.c | 1 +
1 file changed, 1 insertion(+)
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1166,6 +1166,7 @@ static const struct x86_cpu_id rapl_ids[
RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT, rapl_defaults_core),
- RAPL_CPU(INTEL_FAM6_ATOM_GEMINI_LAKE, rapl_defaults_core),
- RAPL_CPU(INTEL_FAM6_ATOM_DENVERTON, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT_X, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_ATOM_TREMONT_X, rapl_defaults_core),
RAPL_CPU(INTEL_FAM6_XEON_PHI_KNL, rapl_defaults_hsw_server),
diff --git a/patches.arch/x86-bugs-add-amd-s-variant-of-ssb_no.patch b/patches.arch/x86-bugs-add-amd-s-variant-of-ssb_no.patch
index 2114216f45..ea0130ee81 100644
--- a/patches.arch/x86-bugs-add-amd-s-variant-of-ssb_no.patch
+++ b/patches.arch/x86-bugs-add-amd-s-variant-of-ssb_no.patch
@@ -44,7 +44,7 @@ Acked-by: Borislav Petkov <bp@suse.de>
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
-@@ -948,7 +948,8 @@ static void __init cpu_set_bug_bits(stru
+@@ -946,7 +946,8 @@ static void __init cpu_set_bug_bits(stru
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
@@ -53,7 +53,7 @@ Acked-by: Borislav Petkov <bp@suse.de>
+ !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
- if (x86_match_cpu(cpu_no_speculation))
+ if (x86_match_cpu(cpu_no_meltdown))
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -367,7 +367,7 @@ static inline int __do_cpuid_ent(struct
diff --git a/patches.arch/x86-cpu-Add-Atom-Tremont-Jacobsville.patch b/patches.arch/x86-cpu-Add-Atom-Tremont-Jacobsville.patch
index 5788f25bbc..243d8cc12c 100644
--- a/patches.arch/x86-cpu-Add-Atom-Tremont-Jacobsville.patch
+++ b/patches.arch/x86-cpu-Add-Atom-Tremont-Jacobsville.patch
@@ -31,7 +31,7 @@ Cc: x86-ml <x86@kernel.org>
Link: https://lkml.kernel.org/r/20190125195902.17109-4-tony.luck@intel.com
Acked-by: Michal Suchanek <msuchanek@suse.de>
---
- arch/x86/include/asm/intel-family.h | 3 ++-
+ arch/x86/include/asm/intel-family.h | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
--- a/arch/x86/include/asm/intel-family.h
@@ -45,10 +45,10 @@ Acked-by: Michal Suchanek <msuchanek@suse.de>
*
* Things ending in "2" are usually because we have no better
* name for them. There's no processor called "SILVERMONT2".
-@@ -63,6 +63,7 @@
- #define INTEL_FAM6_ATOM_GOLDMONT 0x5C
- #define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */
- #define INTEL_FAM6_ATOM_GEMINI_LAKE 0x7A
+@@ -67,6 +67,7 @@
+ #define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */
+ #define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */
+ #define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */
+#define INTEL_FAM6_ATOM_TREMONT_X 0x86 /* Jacobswille */
/* Xeon Phi */
diff --git a/patches.arch/x86-cpu-sanitize-fam6_atom-naming.patch b/patches.arch/x86-cpu-sanitize-fam6_atom-naming.patch
new file mode 100644
index 0000000000..5cdecdb444
--- /dev/null
+++ b/patches.arch/x86-cpu-sanitize-fam6_atom-naming.patch
@@ -0,0 +1,602 @@
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 7 Aug 2018 10:17:27 -0700
+Subject: x86/cpu: Sanitize FAM6_ATOM naming
+Git-commit: f2c4db1bd80720cd8cb2a5aa220d9bc9f374f04e
+Patch-mainline: v5.1-rc1
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+commit f2c4db1bd80720cd8cb2a5aa220d9bc9f374f04e upstream
+
+Going primarily by:
+
+ https://en.wikipedia.org/wiki/List_of_Intel_Atom_microprocessors
+
+with additional information gleaned from other related pages; notably:
+
+ - Bonnell shrink was called Saltwell
+ - Moorefield is the Merriefield refresh which makes it Airmont
+
+The general naming scheme is: FAM6_ATOM_UARCH_SOCTYPE
+
+ for i in `git grep -l FAM6_ATOM` ; do
+ sed -i -e 's/ATOM_PINEVIEW/ATOM_BONNELL/g' \
+ -e 's/ATOM_LINCROFT/ATOM_BONNELL_MID/' \
+ -e 's/ATOM_PENWELL/ATOM_SALTWELL_MID/g' \
+ -e 's/ATOM_CLOVERVIEW/ATOM_SALTWELL_TABLET/g' \
+ -e 's/ATOM_CEDARVIEW/ATOM_SALTWELL/g' \
+ -e 's/ATOM_SILVERMONT1/ATOM_SILVERMONT/g' \
+ -e 's/ATOM_SILVERMONT2/ATOM_SILVERMONT_X/g' \
+ -e 's/ATOM_MERRIFIELD/ATOM_SILVERMONT_MID/g' \
+ -e 's/ATOM_MOOREFIELD/ATOM_AIRMONT_MID/g' \
+ -e 's/ATOM_DENVERTON/ATOM_GOLDMONT_X/g' \
+ -e 's/ATOM_GEMINI_LAKE/ATOM_GOLDMONT_PLUS/g' ${i}
+ done
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Cc: dave.hansen@linux.intel.com
+Cc: len.brown@intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+[ 4.14.y speck backport, commit id there: f0fae1c931dd3a49cd42855836fc3f075960d4be ]
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/events/intel/core.c | 18 +++----
+ arch/x86/events/intel/cstate.c | 4 -
+ arch/x86/events/msr.c | 4 -
+ arch/x86/include/asm/intel-family.h | 30 ++++++-----
+ arch/x86/kernel/cpu/common.c | 28 +++++-----
+ arch/x86/kernel/tsc.c | 2
+ arch/x86/platform/atom/punit_atom_debug.c | 4 -
+ arch/x86/platform/intel-mid/device_libs/platform_bt.c | 2
+ drivers/acpi/acpi_lpss.c | 2
+ drivers/acpi/x86/utils.c | 2
+ drivers/cpufreq/intel_pstate.c | 4 -
+ drivers/edac/pnd2_edac.c | 2
+ drivers/idle/intel_idle.c | 18 +++----
+ drivers/mmc/host/sdhci-acpi.c | 2
+ drivers/pci/pci-mid.c | 4 -
+ drivers/platform/x86/intel_int0002_vgpio.c | 2
+ drivers/platform/x86/intel_mid_powerbtn.c | 4 -
+ drivers/powercap/intel_rapl.c | 10 +--
+ drivers/thermal/intel_soc_dts_thermal.c | 2
+ tools/power/x86/turbostat/turbostat.c | 46 +++++++++---------
+ 20 files changed, 98 insertions(+), 92 deletions(-)
+
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -3724,11 +3724,11 @@ __init int intel_pmu_init(void)
+ pr_cont("Nehalem events, ");
+ break;
+
+- case INTEL_FAM6_ATOM_PINEVIEW:
+- case INTEL_FAM6_ATOM_LINCROFT:
+- case INTEL_FAM6_ATOM_PENWELL:
+- case INTEL_FAM6_ATOM_CLOVERVIEW:
+- case INTEL_FAM6_ATOM_CEDARVIEW:
++ case INTEL_FAM6_ATOM_BONNELL:
++ case INTEL_FAM6_ATOM_BONNELL_MID:
++ case INTEL_FAM6_ATOM_SALTWELL:
++ case INTEL_FAM6_ATOM_SALTWELL_MID:
++ case INTEL_FAM6_ATOM_SALTWELL_TABLET:
+ memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+
+@@ -3740,9 +3740,11 @@ __init int intel_pmu_init(void)
+ pr_cont("Atom events, ");
+ break;
+
+- case INTEL_FAM6_ATOM_SILVERMONT1:
+- case INTEL_FAM6_ATOM_SILVERMONT2:
++ case INTEL_FAM6_ATOM_SILVERMONT:
++ case INTEL_FAM6_ATOM_SILVERMONT_X:
++ case INTEL_FAM6_ATOM_SILVERMONT_MID:
+ case INTEL_FAM6_ATOM_AIRMONT:
++ case INTEL_FAM6_ATOM_AIRMONT_MID:
+ memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
+@@ -3759,7 +3761,7 @@ __init int intel_pmu_init(void)
+ break;
+
+ case INTEL_FAM6_ATOM_GOLDMONT:
+- case INTEL_FAM6_ATOM_DENVERTON:
++ case INTEL_FAM6_ATOM_GOLDMONT_X:
+ memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
+--- a/arch/x86/events/intel/cstate.c
++++ b/arch/x86/events/intel/cstate.c
+@@ -531,8 +531,8 @@ static const struct x86_cpu_id intel_cst
+
+ X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_ULT, hswult_cstates),
+
+- X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT1, slm_cstates),
+- X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT2, slm_cstates),
++ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT, slm_cstates),
++ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT_X, slm_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_AIRMONT, slm_cstates),
+
+ X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_CORE, snb_cstates),
+--- a/arch/x86/events/msr.c
++++ b/arch/x86/events/msr.c
+@@ -61,8 +61,8 @@ static bool test_intel(int idx)
+ case INTEL_FAM6_BROADWELL_GT3E:
+ case INTEL_FAM6_BROADWELL_X:
+
+- case INTEL_FAM6_ATOM_SILVERMONT1:
+- case INTEL_FAM6_ATOM_SILVERMONT2:
++ case INTEL_FAM6_ATOM_SILVERMONT:
++ case INTEL_FAM6_ATOM_SILVERMONT_X:
+ case INTEL_FAM6_ATOM_AIRMONT:
+ if (idx == PERF_MSR_SMI)
+ return true;
+--- a/arch/x86/include/asm/intel-family.h
++++ b/arch/x86/include/asm/intel-family.h
+@@ -50,19 +50,23 @@
+
+ /* "Small Core" Processors (Atom) */
+
+-#define INTEL_FAM6_ATOM_PINEVIEW 0x1C
+-#define INTEL_FAM6_ATOM_LINCROFT 0x26
+-#define INTEL_FAM6_ATOM_PENWELL 0x27
+-#define INTEL_FAM6_ATOM_CLOVERVIEW 0x35
+-#define INTEL_FAM6_ATOM_CEDARVIEW 0x36
+-#define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */
+-#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */
+-#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */
+-#define INTEL_FAM6_ATOM_MERRIFIELD 0x4A /* Tangier */
+-#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Anniedale */
+-#define INTEL_FAM6_ATOM_GOLDMONT 0x5C
+-#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */
+-#define INTEL_FAM6_ATOM_GEMINI_LAKE 0x7A
++#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
++#define INTEL_FAM6_ATOM_BONNELL_MID 0x26 /* Silverthorne, Lincroft */
++
++#define INTEL_FAM6_ATOM_SALTWELL 0x36 /* Cedarview */
++#define INTEL_FAM6_ATOM_SALTWELL_MID 0x27 /* Penwell */
++#define INTEL_FAM6_ATOM_SALTWELL_TABLET 0x35 /* Cloverview */
++
++#define INTEL_FAM6_ATOM_SILVERMONT 0x37 /* Bay Trail, Valleyview */
++#define INTEL_FAM6_ATOM_SILVERMONT_X 0x4D /* Avaton, Rangely */
++#define INTEL_FAM6_ATOM_SILVERMONT_MID 0x4A /* Merriefield */
++
++#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* Cherry Trail, Braswell */
++#define INTEL_FAM6_ATOM_AIRMONT_MID 0x5A /* Moorefield */
++
++#define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */
++#define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */
++#define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */
+
+ /* Xeon Phi */
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -899,11 +899,11 @@ static void identify_cpu_without_cpuid(s
+ }
+
+ static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL, X86_FEATURE_ANY },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_TABLET, X86_FEATURE_ANY },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL_MID, X86_FEATURE_ANY },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_MID, X86_FEATURE_ANY },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL, X86_FEATURE_ANY },
+ { X86_VENDOR_CENTAUR, 5 },
+ { X86_VENDOR_INTEL, 5 },
+ { X86_VENDOR_NSC, 5 },
+@@ -918,10 +918,10 @@ static const __initconst struct x86_cpu_
+
+ /* Only list CPUs which speculate but are non susceptible to SSB */
+ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
+@@ -934,14 +934,14 @@ static const __initconst struct x86_cpu_
+
+ static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
+ /* in addition to cpu_no_speculation */
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MOOREFIELD },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT_MID },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GEMINI_LAKE },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_X },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_PLUS },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
+ {}
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -620,7 +620,7 @@ unsigned long native_calibrate_tsc(void)
+ case INTEL_FAM6_KABYLAKE_DESKTOP:
+ crystal_khz = 24000; /* 24.0 MHz */
+ break;
+- case INTEL_FAM6_ATOM_DENVERTON:
++ case INTEL_FAM6_ATOM_GOLDMONT_X:
+ crystal_khz = 25000; /* 25.0 MHz */
+ break;
+ case INTEL_FAM6_ATOM_GOLDMONT:
+--- a/arch/x86/platform/atom/punit_atom_debug.c
++++ b/arch/x86/platform/atom/punit_atom_debug.c
+@@ -154,8 +154,8 @@ static void punit_dbgfs_unregister(void)
+ (kernel_ulong_t)&drv_data }
+
+ static const struct x86_cpu_id intel_punit_cpu_ids[] = {
+- ICPU(INTEL_FAM6_ATOM_SILVERMONT1, punit_device_byt),
+- ICPU(INTEL_FAM6_ATOM_MERRIFIELD, punit_device_tng),
++ ICPU(INTEL_FAM6_ATOM_SILVERMONT, punit_device_byt),
++ ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID, punit_device_tng),
+ ICPU(INTEL_FAM6_ATOM_AIRMONT, punit_device_cht),
+ {}
+ };
+--- a/arch/x86/platform/intel-mid/device_libs/platform_bt.c
++++ b/arch/x86/platform/intel-mid/device_libs/platform_bt.c
+@@ -68,7 +68,7 @@ static struct bt_sfi_data tng_bt_sfi_dat
+ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (kernel_ulong_t)&ddata }
+
+ static const struct x86_cpu_id bt_sfi_cpu_ids[] = {
+- ICPU(INTEL_FAM6_ATOM_MERRIFIELD, tng_bt_sfi_data),
++ ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID, tng_bt_sfi_data),
+ {}
+ };
+
+--- a/drivers/acpi/acpi_lpss.c
++++ b/drivers/acpi/acpi_lpss.c
+@@ -291,7 +291,7 @@ static const struct lpss_device_desc bsw
+ #define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
+
+ static const struct x86_cpu_id lpss_cpu_ids[] = {
+- ICPU(INTEL_FAM6_ATOM_SILVERMONT1), /* Valleyview, Bay Trail */
++ ICPU(INTEL_FAM6_ATOM_SILVERMONT), /* Valleyview, Bay Trail */
+ ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */
+ {}
+ };
+--- a/drivers/acpi/x86/utils.c
++++ b/drivers/acpi/x86/utils.c
+@@ -54,7 +54,7 @@ static const struct always_present_id al
+ * Bay / Cherry Trail PWM directly poked by GPU driver in win10,
+ * but Linux uses a separate PWM driver, harmless if not used.
+ */
+- ENTRY("80860F09", "1", ICPU(INTEL_FAM6_ATOM_SILVERMONT1), {}),
++ ENTRY("80860F09", "1", ICPU(INTEL_FAM6_ATOM_SILVERMONT), {}),
+ ENTRY("80862288", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {}),
+ /*
+ * The INT0002 device is necessary to clear wakeup interrupt sources
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -2019,7 +2019,7 @@ static const struct pstate_funcs knl_fun
+ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
+ ICPU(INTEL_FAM6_SANDYBRIDGE, core_funcs),
+ ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_funcs),
+- ICPU(INTEL_FAM6_ATOM_SILVERMONT1, silvermont_funcs),
++ ICPU(INTEL_FAM6_ATOM_SILVERMONT, silvermont_funcs),
+ ICPU(INTEL_FAM6_IVYBRIDGE, core_funcs),
+ ICPU(INTEL_FAM6_HASWELL_CORE, core_funcs),
+ ICPU(INTEL_FAM6_BROADWELL_CORE, core_funcs),
+@@ -2036,7 +2036,7 @@ static const struct x86_cpu_id intel_pst
+ ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_funcs),
+ ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_funcs),
+ ICPU(INTEL_FAM6_ATOM_GOLDMONT, core_funcs),
+- ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, core_funcs),
++ ICPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, core_funcs),
+ ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs),
+ {}
+ };
+--- a/drivers/edac/pnd2_edac.c
++++ b/drivers/edac/pnd2_edac.c
+@@ -1541,7 +1541,7 @@ static struct dunit_ops dnv_ops = {
+
+ static const struct x86_cpu_id pnd2_cpuids[] = {
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON, 0, (kernel_ulong_t)&dnv_ops },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_X, 0, (kernel_ulong_t)&dnv_ops },
+ { }
+ };
+ MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -1069,14 +1069,14 @@ static const struct x86_cpu_id intel_idl
+ ICPU(INTEL_FAM6_WESTMERE, idle_cpu_nehalem),
+ ICPU(INTEL_FAM6_WESTMERE_EP, idle_cpu_nehalem),
+ ICPU(INTEL_FAM6_NEHALEM_EX, idle_cpu_nehalem),
+- ICPU(INTEL_FAM6_ATOM_PINEVIEW, idle_cpu_atom),
+- ICPU(INTEL_FAM6_ATOM_LINCROFT, idle_cpu_lincroft),
++ ICPU(INTEL_FAM6_ATOM_BONNELL, idle_cpu_atom),
++ ICPU(INTEL_FAM6_ATOM_BONNELL_MID, idle_cpu_lincroft),
+ ICPU(INTEL_FAM6_WESTMERE_EX, idle_cpu_nehalem),
+ ICPU(INTEL_FAM6_SANDYBRIDGE, idle_cpu_snb),
+ ICPU(INTEL_FAM6_SANDYBRIDGE_X, idle_cpu_snb),
+- ICPU(INTEL_FAM6_ATOM_CEDARVIEW, idle_cpu_atom),
+- ICPU(INTEL_FAM6_ATOM_SILVERMONT1, idle_cpu_byt),
+- ICPU(INTEL_FAM6_ATOM_MERRIFIELD, idle_cpu_tangier),
++ ICPU(INTEL_FAM6_ATOM_SALTWELL, idle_cpu_atom),
++ ICPU(INTEL_FAM6_ATOM_SILVERMONT, idle_cpu_byt),
++ ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID, idle_cpu_tangier),
+ ICPU(INTEL_FAM6_ATOM_AIRMONT, idle_cpu_cht),
+ ICPU(INTEL_FAM6_IVYBRIDGE, idle_cpu_ivb),
+ ICPU(INTEL_FAM6_IVYBRIDGE_X, idle_cpu_ivt),
+@@ -1084,7 +1084,7 @@ static const struct x86_cpu_id intel_idl
+ ICPU(INTEL_FAM6_HASWELL_X, idle_cpu_hsw),
+ ICPU(INTEL_FAM6_HASWELL_ULT, idle_cpu_hsw),
+ ICPU(INTEL_FAM6_HASWELL_GT3E, idle_cpu_hsw),
+- ICPU(INTEL_FAM6_ATOM_SILVERMONT2, idle_cpu_avn),
++ ICPU(INTEL_FAM6_ATOM_SILVERMONT_X, idle_cpu_avn),
+ ICPU(INTEL_FAM6_BROADWELL_CORE, idle_cpu_bdw),
+ ICPU(INTEL_FAM6_BROADWELL_GT3E, idle_cpu_bdw),
+ ICPU(INTEL_FAM6_BROADWELL_X, idle_cpu_bdw),
+@@ -1097,8 +1097,8 @@ static const struct x86_cpu_id intel_idl
+ ICPU(INTEL_FAM6_XEON_PHI_KNL, idle_cpu_knl),
+ ICPU(INTEL_FAM6_XEON_PHI_KNM, idle_cpu_knl),
+ ICPU(INTEL_FAM6_ATOM_GOLDMONT, idle_cpu_bxt),
+- ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, idle_cpu_bxt),
+- ICPU(INTEL_FAM6_ATOM_DENVERTON, idle_cpu_dnv),
++ ICPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, idle_cpu_bxt),
++ ICPU(INTEL_FAM6_ATOM_GOLDMONT_X, idle_cpu_dnv),
+ {}
+ };
+
+@@ -1310,7 +1310,7 @@ static void intel_idle_state_table_updat
+ ivt_idle_state_table_update();
+ break;
+ case INTEL_FAM6_ATOM_GOLDMONT:
+- case INTEL_FAM6_ATOM_GEMINI_LAKE:
++ case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
+ bxt_idle_state_table_update();
+ break;
+ case INTEL_FAM6_SKYLAKE_DESKTOP:
+--- a/drivers/mmc/host/sdhci-acpi.c
++++ b/drivers/mmc/host/sdhci-acpi.c
+@@ -128,7 +128,7 @@ static const struct sdhci_acpi_chip sdhc
+ static bool sdhci_acpi_byt(void)
+ {
+ static const struct x86_cpu_id byt[] = {
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT },
+ {}
+ };
+
+--- a/drivers/pci/pci-mid.c
++++ b/drivers/pci/pci-mid.c
+@@ -71,8 +71,8 @@ static const struct pci_platform_pm_ops
+ * arch/x86/platform/intel-mid/pwr.c.
+ */
+ static const struct x86_cpu_id lpss_cpu_ids[] = {
+- ICPU(INTEL_FAM6_ATOM_PENWELL),
+- ICPU(INTEL_FAM6_ATOM_MERRIFIELD),
++ ICPU(INTEL_FAM6_ATOM_SALTWELL_MID),
++ ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID),
+ {}
+ };
+
+--- a/drivers/platform/x86/intel_int0002_vgpio.c
++++ b/drivers/platform/x86/intel_int0002_vgpio.c
+@@ -60,7 +60,7 @@ static const struct x86_cpu_id int0002_c
+ /*
+ * Limit ourselves to Cherry Trail for now, until testing shows we
+ * need to handle the INT0002 device on Baytrail too.
+- * ICPU(INTEL_FAM6_ATOM_SILVERMONT1), * Valleyview, Bay Trail *
++ * ICPU(INTEL_FAM6_ATOM_SILVERMONT), * Valleyview, Bay Trail *
+ */
+ ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */
+ {}
+--- a/drivers/platform/x86/intel_mid_powerbtn.c
++++ b/drivers/platform/x86/intel_mid_powerbtn.c
+@@ -125,8 +125,8 @@ static struct mid_pb_ddata mrfld_ddata =
+ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (kernel_ulong_t)&ddata }
+
+ static const struct x86_cpu_id mid_pb_cpu_ids[] = {
+- ICPU(INTEL_FAM6_ATOM_PENWELL, mfld_ddata),
+- ICPU(INTEL_FAM6_ATOM_MERRIFIELD, mrfld_ddata),
++ ICPU(INTEL_FAM6_ATOM_SALTWELL_MID, mfld_ddata),
++ ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID, mrfld_ddata),
+ {}
+ };
+
+--- a/drivers/powercap/intel_rapl.c
++++ b/drivers/powercap/intel_rapl.c
+@@ -1159,13 +1159,13 @@ static const struct x86_cpu_id rapl_ids[
+ RAPL_CPU(INTEL_FAM6_KABYLAKE_MOBILE, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_KABYLAKE_DESKTOP, rapl_defaults_core),
+
+- RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT1, rapl_defaults_byt),
++ RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT, rapl_defaults_byt),
+ RAPL_CPU(INTEL_FAM6_ATOM_AIRMONT, rapl_defaults_cht),
+- RAPL_CPU(INTEL_FAM6_ATOM_MERRIFIELD, rapl_defaults_tng),
+- RAPL_CPU(INTEL_FAM6_ATOM_MOOREFIELD, rapl_defaults_ann),
++ RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT_MID,rapl_defaults_tng),
++ RAPL_CPU(INTEL_FAM6_ATOM_AIRMONT_MID, rapl_defaults_ann),
+ RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT, rapl_defaults_core),
+- RAPL_CPU(INTEL_FAM6_ATOM_GEMINI_LAKE, rapl_defaults_core),
+- RAPL_CPU(INTEL_FAM6_ATOM_DENVERTON, rapl_defaults_core),
++ RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, rapl_defaults_core),
++ RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT_X, rapl_defaults_core),
+
+ RAPL_CPU(INTEL_FAM6_XEON_PHI_KNL, rapl_defaults_hsw_server),
+ RAPL_CPU(INTEL_FAM6_XEON_PHI_KNM, rapl_defaults_hsw_server),
+--- a/drivers/thermal/intel_soc_dts_thermal.c
++++ b/drivers/thermal/intel_soc_dts_thermal.c
+@@ -43,7 +43,7 @@ static irqreturn_t soc_irq_thread_fn(int
+ }
+
+ static const struct x86_cpu_id soc_thermal_ids[] = {
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1, 0,
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT, 0,
+ BYT_SOC_DTS_APIC_IRQ},
+ {}
+ };
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -1839,7 +1839,7 @@ int has_turbo_ratio_group_limits(int fam
+ switch (model) {
+ case INTEL_FAM6_ATOM_GOLDMONT:
+ case INTEL_FAM6_SKYLAKE_X:
+- case INTEL_FAM6_ATOM_DENVERTON:
++ case INTEL_FAM6_ATOM_GOLDMONT_X:
+ return 1;
+ }
+ return 0;
+@@ -2701,9 +2701,9 @@ int probe_nhm_msrs(unsigned int family,
+ pkg_cstate_limits = skx_pkg_cstate_limits;
+ has_misc_feature_control = 1;
+ break;
+- case INTEL_FAM6_ATOM_SILVERMONT1: /* BYT */
++ case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */
+ no_MSR_MISC_PWR_MGMT = 1;
+- case INTEL_FAM6_ATOM_SILVERMONT2: /* AVN */
++ case INTEL_FAM6_ATOM_SILVERMONT_X: /* AVN */
+ pkg_cstate_limits = slv_pkg_cstate_limits;
+ break;
+ case INTEL_FAM6_ATOM_AIRMONT: /* AMT */
+@@ -2715,8 +2715,8 @@ int probe_nhm_msrs(unsigned int family,
+ pkg_cstate_limits = phi_pkg_cstate_limits;
+ break;
+ case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
+- case INTEL_FAM6_ATOM_GEMINI_LAKE:
+- case INTEL_FAM6_ATOM_DENVERTON: /* DNV */
++ case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
++ case INTEL_FAM6_ATOM_GOLDMONT_X: /* DNV */
+ pkg_cstate_limits = bxt_pkg_cstate_limits;
+ break;
+ default:
+@@ -2745,9 +2745,9 @@ int has_slv_msrs(unsigned int family, un
+ return 0;
+
+ switch (model) {
+- case INTEL_FAM6_ATOM_SILVERMONT1:
+- case INTEL_FAM6_ATOM_MERRIFIELD:
+- case INTEL_FAM6_ATOM_MOOREFIELD:
++ case INTEL_FAM6_ATOM_SILVERMONT:
++ case INTEL_FAM6_ATOM_SILVERMONT_MID:
++ case INTEL_FAM6_ATOM_AIRMONT_MID:
+ return 1;
+ }
+ return 0;
+@@ -2759,7 +2759,7 @@ int is_dnv(unsigned int family, unsigned
+ return 0;
+
+ switch (model) {
+- case INTEL_FAM6_ATOM_DENVERTON:
++ case INTEL_FAM6_ATOM_GOLDMONT_X:
+ return 1;
+ }
+ return 0;
+@@ -3275,8 +3275,8 @@ double get_tdp(unsigned int model)
+ return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
+
+ switch (model) {
+- case INTEL_FAM6_ATOM_SILVERMONT1:
+- case INTEL_FAM6_ATOM_SILVERMONT2:
++ case INTEL_FAM6_ATOM_SILVERMONT:
++ case INTEL_FAM6_ATOM_SILVERMONT_X:
+ return 30.0;
+ default:
+ return 135.0;
+@@ -3342,7 +3342,7 @@ void rapl_probe(unsigned int family, uns
+ }
+ break;
+ case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
+- case INTEL_FAM6_ATOM_GEMINI_LAKE:
++ case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
+ do_rapl = RAPL_PKG | RAPL_PKG_POWER_INFO;
+ if (rapl_joules)
+ BIC_PRESENT(BIC_Pkg_J);
+@@ -3400,8 +3400,8 @@ void rapl_probe(unsigned int family, uns
+ BIC_PRESENT(BIC_RAMWatt);
+ }
+ break;
+- case INTEL_FAM6_ATOM_SILVERMONT1: /* BYT */
+- case INTEL_FAM6_ATOM_SILVERMONT2: /* AVN */
++ case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */
++ case INTEL_FAM6_ATOM_SILVERMONT_X: /* AVN */
+ do_rapl = RAPL_PKG | RAPL_CORES;
+ if (rapl_joules) {
+ BIC_PRESENT(BIC_Pkg_J);
+@@ -3411,7 +3411,7 @@ void rapl_probe(unsigned int family, uns
+ BIC_PRESENT(BIC_CorWatt);
+ }
+ break;
+- case INTEL_FAM6_ATOM_DENVERTON: /* DNV */
++ case INTEL_FAM6_ATOM_GOLDMONT_X: /* DNV */
+ do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO | RAPL_CORES_ENERGY_STATUS;
+ BIC_PRESENT(BIC_PKG__);
+ BIC_PRESENT(BIC_RAM__);
+@@ -3434,7 +3434,7 @@ void rapl_probe(unsigned int family, uns
+ return;
+
+ rapl_power_units = 1.0 / (1 << (msr & 0xF));
+- if (model == INTEL_FAM6_ATOM_SILVERMONT1)
++ if (model == INTEL_FAM6_ATOM_SILVERMONT)
+ rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000;
+ else
+ rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
+@@ -3684,8 +3684,8 @@ int has_snb_msrs(unsigned int family, un
+ case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
+ case INTEL_FAM6_SKYLAKE_X: /* SKX */
+ case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
+- case INTEL_FAM6_ATOM_GEMINI_LAKE:
+- case INTEL_FAM6_ATOM_DENVERTON: /* DNV */
++ case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
++ case INTEL_FAM6_ATOM_GOLDMONT_X: /* DNV */
+ return 1;
+ }
+ return 0;
+@@ -3716,7 +3716,7 @@ int has_hsw_msrs(unsigned int family, un
+ case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
+ case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
+ case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
+- case INTEL_FAM6_ATOM_GEMINI_LAKE:
++ case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
+ return 1;
+ }
+ return 0;
+@@ -3750,8 +3750,8 @@ int is_slm(unsigned int family, unsigned
+ if (!genuine_intel)
+ return 0;
+ switch (model) {
+- case INTEL_FAM6_ATOM_SILVERMONT1: /* BYT */
+- case INTEL_FAM6_ATOM_SILVERMONT2: /* AVN */
++ case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */
++ case INTEL_FAM6_ATOM_SILVERMONT_X: /* AVN */
+ return 1;
+ }
+ return 0;
+@@ -4106,11 +4106,11 @@ void process_cpuid()
+ crystal_hz = 24000000; /* 24.0 MHz */
+ break;
+ case INTEL_FAM6_SKYLAKE_X: /* SKX */
+- case INTEL_FAM6_ATOM_DENVERTON: /* DNV */
++ case INTEL_FAM6_ATOM_GOLDMONT_X: /* DNV */
+ crystal_hz = 25000000; /* 25.0 MHz */
+ break;
+ case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
+- case INTEL_FAM6_ATOM_GEMINI_LAKE:
++ case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
+ crystal_hz = 19200000; /* 19.2 MHz */
+ break;
+ default:
diff --git a/patches.arch/x86-kvm-expose-x86_feature_md_clear-to-guests.patch b/patches.arch/x86-kvm-expose-x86_feature_md_clear-to-guests.patch
new file mode 100644
index 0000000000..ef439ddf46
--- /dev/null
+++ b/patches.arch/x86-kvm-expose-x86_feature_md_clear-to-guests.patch
@@ -0,0 +1,45 @@
+From: Andi Kleen <ak@linux.intel.com>
+Date: Fri, 18 Jan 2019 16:50:23 -0800
+Subject: x86/kvm: Expose X86_FEATURE_MD_CLEAR to guests
+Git-commit: 6c4dbbd14730c43f4ed808a9c42ca41625925c22
+Patch-mainline: v5.1-rc1
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+X86_FEATURE_MD_CLEAR is a new CPUID bit which is set when microcode
+provides the mechanism to invoke a flush of various exploitable CPU buffers
+by invoking the VERW instruction.
+
+Hand it through to guests so they can adjust their mitigations.
+
+This also requires corresponding qemu changes, which are available
+separately.
+
+[ tglx: Massaged changelog ]
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Reviewed-by: Jon Masters <jcm@redhat.com>
+Tested-by: Jon Masters <jcm@redhat.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kvm/cpuid.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index c07958b59f50..39501e7afdb4 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -410,7 +410,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+ /* cpuid 7.0.edx*/
+ const u32 kvm_cpuid_7_0_edx_x86_features =
+ F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
+- F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP);
++ F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
++ F(MD_CLEAR);
+
+ /* all calls to cpuid_count() should be made on the same cpu */
+ get_cpu();
+
diff --git a/patches.arch/x86-kvm-vmx-add-mds-protection-when-l1d-flush-is-not-active.patch b/patches.arch/x86-kvm-vmx-add-mds-protection-when-l1d-flush-is-not-active.patch
new file mode 100644
index 0000000000..72dc41269e
--- /dev/null
+++ b/patches.arch/x86-kvm-vmx-add-mds-protection-when-l1d-flush-is-not-active.patch
@@ -0,0 +1,52 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 27 Feb 2019 12:48:14 +0100
+Subject: x86/kvm/vmx: Add MDS protection when L1D Flush is not active
+Git-commit: 650b68a0622f933444a6d66936abb3103029413b
+Patch-mainline: v5.1-rc1
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+CPUs which are affected by L1TF and MDS mitigate MDS with the L1D Flush on
+VMENTER when updated microcode is installed.
+
+If a CPU is not affected by L1TF or if the L1D Flush is not in use, then
+MDS mitigation needs to be invoked explicitly.
+
+For these cases, follow the host mitigation state and invoke the MDS
+mitigation before VMENTER.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Jon Masters <jcm@redhat.com>
+Tested-by: Jon Masters <jcm@redhat.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/bugs.c | 1 +
+ arch/x86/kvm/vmx.c | 3 +++
+ 2 files changed, 4 insertions(+)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -62,6 +62,7 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_always
+
+ /* Control MDS CPU buffer clear before returning to user space */
+ DEFINE_STATIC_KEY_FALSE(mds_user_clear);
++EXPORT_SYMBOL_GPL(mds_user_clear);
+
+ void __init check_bugs(void)
+ {
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -9684,8 +9684,11 @@ static void __noclone vmx_vcpu_run(struc
+
+ vmx->__launched = vmx->loaded_vmcs->launched;
+
++ /* L1D Flush includes CPU buffer clear to mitigate MDS */
+ if (static_branch_unlikely(&vmx_l1d_should_flush))
+ vmx_l1d_flush(vcpu);
++ else if (static_branch_unlikely(&mds_user_clear))
++ mds_clear_cpu_buffers();
+
+ asm(
+ /* Store host registers */
diff --git a/patches.arch/x86-msr-index-cleanup-bit-defines.patch b/patches.arch/x86-msr-index-cleanup-bit-defines.patch
new file mode 100644
index 0000000000..4fed2b84e7
--- /dev/null
+++ b/patches.arch/x86-msr-index-cleanup-bit-defines.patch
@@ -0,0 +1,102 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 21 Feb 2019 12:36:50 +0100
+Subject: x86/msr-index: Cleanup bit defines
+Git-commit: d8eabc37310a92df40d07c5a8afc53cebf996716
+Patch-mainline: v5.1-rc1
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+Greg pointed out that speculation related bit defines are using (1 << N)
+format instead of BIT(N). Aside of that (1 << N) is wrong as it should use
+1UL at least.
+
+Clean it up.
+
+[ Josh Poimboeuf: Fix tools build ]
+
+Reported-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Reviewed-by: Jon Masters <jcm@redhat.com>
+Tested-by: Jon Masters <jcm@redhat.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/msr-index.h | 34 ++++++++++++++++++----------------
+ tools/power/x86/turbostat/Makefile | 2 +-
+ 2 files changed, 19 insertions(+), 17 deletions(-)
+
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -1,6 +1,8 @@
+ #ifndef _ASM_X86_MSR_INDEX_H
+ #define _ASM_X86_MSR_INDEX_H
+
++#include <linux/bits.h>
++
+ /*
+ * CPU model specific register (MSR) numbers.
+ *
+@@ -39,14 +41,14 @@
+ /* Intel MSRs. Some also available on other CPUs */
+
+ #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
+-#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
++#define SPEC_CTRL_IBRS BIT(0) /* Indirect Branch Restricted Speculation */
+ #define SPEC_CTRL_STIBP_SHIFT 1 /* Single Thread Indirect Branch Predictor (STIBP) bit */
+-#define SPEC_CTRL_STIBP (1 << SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
++#define SPEC_CTRL_STIBP BIT(SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
+ #define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
+-#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
++#define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
+
+ #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
+-#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
++#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
+
+ #define MSR_PPIN_CTL 0x0000004e
+ #define MSR_PPIN 0x0000004f
+@@ -68,20 +70,20 @@
+ #define MSR_MTRRcap 0x000000fe
+
+ #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
+-#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
+-#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
+-#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH (1 << 3) /* Skip L1D flush on vmentry */
+-#define ARCH_CAP_SSB_NO (1 << 4) /*
+- * Not susceptible to Speculative Store Bypass
+- * attack, so no Speculative Store Bypass
+- * control required.
+- */
++#define ARCH_CAP_RDCL_NO BIT(0) /* Not susceptible to Meltdown */
++#define ARCH_CAP_IBRS_ALL BIT(1) /* Enhanced IBRS support */
++#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3) /* Skip L1D flush on vmentry */
++#define ARCH_CAP_SSB_NO BIT(4) /*
++ * Not susceptible to Speculative Store Bypass
++ * attack, so no Speculative Store Bypass
++ * control required.
++ */
+
+ #define MSR_IA32_FLUSH_CMD 0x0000010b
+-#define L1D_FLUSH (1 << 0) /*
+- * Writeback and invalidate the
+- * L1 data cache.
+- */
++#define L1D_FLUSH BIT(0) /*
++ * Writeback and invalidate the
++ * L1 data cache.
++ */
+
+ #define MSR_IA32_BBL_CR_CTL 0x00000119
+ #define MSR_IA32_BBL_CR_CTL3 0x0000011e
+--- a/tools/power/x86/turbostat/Makefile
++++ b/tools/power/x86/turbostat/Makefile
+@@ -8,7 +8,7 @@ ifeq ("$(origin O)", "command line")
+ endif
+
+ turbostat : turbostat.c
+-CFLAGS += -Wall
++override CFLAGS += -Wall -I../../../include
+ CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
+ CFLAGS += -DINTEL_FAMILY_HEADER='"../../../../arch/x86/include/asm/intel-family.h"'
+
diff --git a/patches.arch/x86-speculation-consolidate-cpu-whitelists.patch b/patches.arch/x86-speculation-consolidate-cpu-whitelists.patch
new file mode 100644
index 0000000000..a8953a02a2
--- /dev/null
+++ b/patches.arch/x86-speculation-consolidate-cpu-whitelists.patch
@@ -0,0 +1,167 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 27 Feb 2019 10:10:23 +0100
+Subject: x86/speculation: Consolidate CPU whitelists
+Git-commit: 36ad35131adacc29b328b9c8b6277a8bf0d6fd5d
+Patch-mainline: v5.1-rc1
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+The CPU vulnerability whitelists have some overlap and there are more
+whitelists coming along.
+
+Use the driver_data field in the x86_cpu_id struct to denote the
+whitelisted vulnerabilities and combine all whitelists into one.
+
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Jon Masters <jcm@redhat.com>
+Tested-by: Jon Masters <jcm@redhat.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/common.c | 103 ++++++++++++++++++++++---------------------
+ 1 file changed, 55 insertions(+), 48 deletions(-)
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -898,60 +898,68 @@ static void identify_cpu_without_cpuid(s
+ #endif
+ }
+
+-static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_TABLET, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL_MID, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_MID, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL, X86_FEATURE_ANY },
+- { X86_VENDOR_CENTAUR, 5 },
+- { X86_VENDOR_INTEL, 5 },
+- { X86_VENDOR_NSC, 5 },
+- { X86_VENDOR_ANY, 4 },
+- {}
+-};
++#define NO_SPECULATION BIT(0)
++#define NO_MELTDOWN BIT(1)
++#define NO_SSB BIT(2)
++#define NO_L1TF BIT(3)
++
++#define VULNWL(_vendor, _family, _model, _whitelist) \
++ { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
++
++#define VULNWL_INTEL(model, whitelist) \
++ VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist)
++
++#define VULNWL_AMD(family, whitelist) \
++ VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
++
++static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
++ VULNWL(ANY, 4, X86_MODEL_ANY, NO_SPECULATION),
++ VULNWL(CENTAUR, 5, X86_MODEL_ANY, NO_SPECULATION),
++ VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION),
++ VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
++
++ VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION),
++ VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION),
++ VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION),
++ VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION),
++ VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION),
++
++ VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF),
++ VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF),
++ VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF),
++ VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF),
++ VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF),
++ VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF),
++
++ VULNWL_INTEL(CORE_YONAH, NO_SSB),
++
++ VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF),
++ VULNWL_INTEL(ATOM_GOLDMONT, NO_L1TF),
++ VULNWL_INTEL(ATOM_GOLDMONT_X, NO_L1TF),
++ VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_L1TF),
++
++ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF),
++ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF),
++ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF),
++ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF),
+
+-static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
+- { X86_VENDOR_AMD },
++ /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
++ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF),
+ {}
+ };
+
+-/* Only list CPUs which speculate but are non susceptible to SSB */
+-static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
+- { X86_VENDOR_AMD, 0x12, },
+- { X86_VENDOR_AMD, 0x11, },
+- { X86_VENDOR_AMD, 0x10, },
+- { X86_VENDOR_AMD, 0xf, },
+- {}
+-};
++static bool __init cpu_matches(unsigned long which)
++{
++ const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist);
+
+-static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
+- /* in addition to cpu_no_speculation */
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT_MID },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_X },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_PLUS },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
+- {}
+-};
++ return m && !!(m->driver_data & which);
++}
+
+ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ {
+ u64 ia32_cap = 0;
+
+- if (x86_match_cpu(cpu_no_speculation))
++ if (cpu_matches(NO_SPECULATION))
+ return;
+
+ setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+@@ -960,15 +968,14 @@ static void __init cpu_set_bug_bits(stru
+ if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+
+- if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
+- !(ia32_cap & ARCH_CAP_SSB_NO) &&
++ if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) &&
+ !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
+ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
+ if (ia32_cap & ARCH_CAP_IBRS_ALL)
+ setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
+
+- if (x86_match_cpu(cpu_no_meltdown))
++ if (cpu_matches(NO_MELTDOWN))
+ return;
+
+ /* Rogue Data Cache Load? No! */
+@@ -977,7 +984,7 @@ static void __init cpu_set_bug_bits(stru
+
+ setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+
+- if (x86_match_cpu(cpu_no_l1tf))
++ if (cpu_matches(NO_L1TF))
+ return;
+
+ setup_force_cpu_bug(X86_BUG_L1TF);
diff --git a/patches.arch/x86-speculation-mds-add-basic-bug-infrastructure-for-mds.patch b/patches.arch/x86-speculation-mds-add-basic-bug-infrastructure-for-mds.patch
new file mode 100644
index 0000000000..3b673bacec
--- /dev/null
+++ b/patches.arch/x86-speculation-mds-add-basic-bug-infrastructure-for-mds.patch
@@ -0,0 +1,148 @@
+From: Andi Kleen <ak@linux.intel.com>
+Date: Fri, 18 Jan 2019 16:50:16 -0800
+Subject: x86/speculation/mds: Add basic bug infrastructure for MDS
+Git-commit: ed5194c2732c8084af9fd159c146ea92bf137128
+Patch-mainline: v5.1-rc1
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+Microarchitectural Data Sampling (MDS), is a class of side channel attacks
+on internal buffers in Intel CPUs. The variants are:
+
+ - Microarchitectural Store Buffer Data Sampling (MSBDS) (CVE-2018-12126)
+ - Microarchitectural Fill Buffer Data Sampling (MFBDS) (CVE-2018-12130)
+ - Microarchitectural Load Port Data Sampling (MLPDS) (CVE-2018-12127)
+
+MSBDS leaks Store Buffer Entries which can be speculatively forwarded to a
+dependent load (store-to-load forwarding) as an optimization. The forward
+can also happen to a faulting or assisting load operation for a different
+memory address, which can be exploited under certain conditions. Store
+buffers are partitioned between Hyper-Threads so cross thread forwarding is
+not possible. But if a thread enters or exits a sleep state the store
+buffer is repartitioned which can expose data from one thread to the other.
+
+MFBDS leaks Fill Buffer Entries. Fill buffers are used internally to manage
+L1 miss situations and to hold data which is returned or sent in response
+to a memory or I/O operation. Fill buffers can forward data to a load
+operation and also write data to the cache. When the fill buffer is
+deallocated it can retain the stale data of the preceding operations which
+can then be forwarded to a faulting or assisting load operation, which can
+be exploited under certain conditions. Fill buffers are shared between
+Hyper-Threads so cross thread leakage is possible.
+
+MLDPS leaks Load Port Data. Load ports are used to perform load operations
+from memory or I/O. The received data is then forwarded to the register
+file or a subsequent operation. In some implementations the Load Port can
+contain stale data from a previous operation which can be forwarded to
+faulting or assisting loads under certain conditions, which again can be
+exploited eventually. Load ports are shared between Hyper-Threads so cross
+thread leakage is possible.
+
+All variants have the same mitigation for single CPU thread case (SMT off),
+so the kernel can treat them as one MDS issue.
+
+Add the basic infrastructure to detect if the current CPU is affected by
+MDS.
+
+[ tglx: Rewrote changelog ]
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Reviewed-by: Jon Masters <jcm@redhat.com>
+Tested-by: Jon Masters <jcm@redhat.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/cpufeatures.h | 2 ++
+ arch/x86/include/asm/msr-index.h | 5 +++++
+ arch/x86/kernel/cpu/common.c | 25 ++++++++++++++++---------
+ 3 files changed, 23 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -341,6 +341,7 @@
+ #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
+ #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+ #define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
++#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
+ #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
+ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
+ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
+@@ -378,4 +379,5 @@
+ #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
+ #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
+ #define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
++#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
+ #endif /* _ASM_X86_CPUFEATURES_H */
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -78,6 +78,11 @@
+ * attack, so no Speculative Store Bypass
+ * control required.
+ */
++#define ARCH_CAP_MDS_NO BIT(5) /*
++ * Not susceptible to
++ * Microarchitectural Data
++ * Sampling (MDS) vulnerabilities.
++ */
+
+ #define MSR_IA32_FLUSH_CMD 0x0000010b
+ #define L1D_FLUSH BIT(0) /*
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -902,6 +902,7 @@ static void identify_cpu_without_cpuid(s
+ #define NO_MELTDOWN BIT(1)
+ #define NO_SSB BIT(2)
+ #define NO_L1TF BIT(3)
++#define NO_MDS BIT(4)
+
+ #define VULNWL(_vendor, _family, _model, _whitelist) \
+ { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
+@@ -918,6 +919,7 @@ static const __initconst struct x86_cpu_
+ VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION),
+ VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
+
++ /* Intel Family 6 */
+ VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION),
+ VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION),
+ VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION),
+@@ -934,17 +936,19 @@ static const __initconst struct x86_cpu_
+ VULNWL_INTEL(CORE_YONAH, NO_SSB),
+
+ VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF),
+- VULNWL_INTEL(ATOM_GOLDMONT, NO_L1TF),
+- VULNWL_INTEL(ATOM_GOLDMONT_X, NO_L1TF),
+- VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_L1TF),
+-
+- VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF),
+- VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF),
+- VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF),
+- VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF),
++
++ VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF),
++ VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF),
++ VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF),
++
++ /* AMD Family 0xf - 0x12 */
++ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
++ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
++ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
++ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+
+ /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
+- VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF),
++ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS),
+ {}
+ };
+
+@@ -975,6 +979,9 @@ static void __init cpu_set_bug_bits(stru
+ if (ia32_cap & ARCH_CAP_IBRS_ALL)
+ setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
+
++ if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO))
++ setup_force_cpu_bug(X86_BUG_MDS);
++
+ if (cpu_matches(NO_MELTDOWN))
+ return;
+
diff --git a/patches.arch/x86-speculation-mds-add-bug_msbds_only.patch b/patches.arch/x86-speculation-mds-add-bug_msbds_only.patch
new file mode 100644
index 0000000000..b6ebab3b9e
--- /dev/null
+++ b/patches.arch/x86-speculation-mds-add-bug_msbds_only.patch
@@ -0,0 +1,86 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 1 Mar 2019 20:21:08 +0100
+Subject: x86/speculation/mds: Add BUG_MSBDS_ONLY
+Git-commit: e261f209c3666e842fd645a1e31f001c3a26def9
+Patch-mainline: v5.1-rc1
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+This bug bit is set on CPUs which are only affected by Microarchitectural
+Store Buffer Data Sampling (MSBDS) and not by any other MDS variant.
+
+This is important because the Store Buffers are partitioned between
+Hyper-Threads so cross thread forwarding is not possible. But if a thread
+enters or exits a sleep state the store buffer is repartitioned which can
+expose data from one thread to the other. This transition can be mitigated.
+
+That means that for CPUs which are only affected by MSBDS SMT can be
+enabled, if the CPU is not affected by other SMT sensitive vulnerabilities,
+e.g. L1TF. The XEON PHI variants fall into that category. Also the
+Silvermont/Airmont ATOMs, but for them it's not really relevant as they do
+not support SMT, but mark them for completeness sake.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Reviewed-by: Jon Masters <jcm@redhat.com>
+Tested-by: Jon Masters <jcm@redhat.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ arch/x86/kernel/cpu/common.c | 20 ++++++++++++--------
+ 2 files changed, 13 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -379,4 +379,5 @@
+ #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
+ #define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
+ #define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
++#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */
+ #endif /* _ASM_X86_CPUFEATURES_H */
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -903,6 +903,7 @@ static void identify_cpu_without_cpuid(s
+ #define NO_SSB BIT(2)
+ #define NO_L1TF BIT(3)
+ #define NO_MDS BIT(4)
++#define MSBDS_ONLY BIT(5)
+
+ #define VULNWL(_vendor, _family, _model, _whitelist) \
+ { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
+@@ -926,16 +927,16 @@ static const __initconst struct x86_cpu_
+ VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION),
+ VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION),
+
+- VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF),
+- VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF),
+- VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF),
+- VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF),
+- VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF),
+- VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF),
++ VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY),
++ VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY),
++ VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY),
++ VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY),
++ VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY),
++ VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY),
+
+ VULNWL_INTEL(CORE_YONAH, NO_SSB),
+
+- VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF),
++ VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY),
+
+ VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF),
+ VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF),
+@@ -979,8 +980,11 @@ static void __init cpu_set_bug_bits(stru
+ if (ia32_cap & ARCH_CAP_IBRS_ALL)
+ setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
+
+- if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO))
++ if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) {
+ setup_force_cpu_bug(X86_BUG_MDS);
++ if (cpu_matches(MSBDS_ONLY))
++ setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
++ }
+
+ if (cpu_matches(NO_MELTDOWN))
+ return;
diff --git a/patches.arch/x86-speculation-mds-add-mds-full-nosmt-cmdline-option.patch b/patches.arch/x86-speculation-mds-add-mds-full-nosmt-cmdline-option.patch
new file mode 100644
index 0000000000..d3303c9f25
--- /dev/null
+++ b/patches.arch/x86-speculation-mds-add-mds-full-nosmt-cmdline-option.patch
@@ -0,0 +1,85 @@
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Date: Tue, 2 Apr 2019 09:59:33 -0500
+Subject: x86/speculation/mds: Add mds=full,nosmt cmdline option
+Git-repo: tip/tip
+Git-commit: d71eb0ce109a124b0fa714832823b9452f2762cf
+Patch-mainline: Queued in a subsystem tree
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+Add the mds=full,nosmt cmdline option. This is like mds=full, but with
+SMT disabled if the CPU is vulnerable.
+
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Tyler Hicks <tyhicks@canonical.com>
+Acked-by: Jiri Kosina <jkosina@suse.cz>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ Documentation/admin-guide/kernel-parameters.txt | 6 ++++--
+ Documentation/x86/mds.rst | 4 ++++
+ arch/x86/kernel/cpu/bugs.c | 10 ++++++++++
+ 3 files changed, 18 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -342,6 +342,7 @@ early_param("l1tf", l1tf_cmdline);
+
+ /* Default mitigation for L1TF-affected CPUs */
+ static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
++static bool mds_nosmt __ro_after_init = false;
+
+ static const char * const mds_strings[] = {
+ [MDS_MITIGATION_OFF] = "Vulnerable",
+@@ -359,8 +360,13 @@ static void __init mds_select_mitigation
+ if (mds_mitigation == MDS_MITIGATION_FULL) {
+ if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
+ mds_mitigation = MDS_MITIGATION_VMWERV;
++
+ static_branch_enable(&mds_user_clear);
++
++ if (mds_nosmt && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
++ cpu_smt_disable(false);
+ }
++
+ pr_info("%s\n", mds_strings[mds_mitigation]);
+ }
+
+@@ -376,6 +382,10 @@ static int __init mds_cmdline(char *str)
+ mds_mitigation = MDS_MITIGATION_OFF;
+ else if (!strcmp(str, "full"))
+ mds_mitigation = MDS_MITIGATION_FULL;
++ else if (!strcmp(str, "full,nosmt")) {
++ mds_mitigation = MDS_MITIGATION_FULL;
++ mds_nosmt = true;
++ }
+
+ return 0;
+ }
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2221,8 +2221,10 @@
+ This parameter controls the MDS mitigation. The
+ options are:
+
+- full - Enable MDS mitigation on vulnerable CPUs
+- off - Unconditionally disable MDS mitigation
++ full - Enable MDS mitigation on vulnerable CPUs
++ full,nosmt - Enable MDS mitigation and disable
++ SMT on vulnerable CPUs
++ off - Unconditionally disable MDS mitigation
+
+ Not specifying this option is equivalent to
+ mds=full.
+--- a/Documentation/x86/mds.rst
++++ b/Documentation/x86/mds.rst
+@@ -119,6 +119,10 @@ Kernel internal mitigation modes
+ scenarios where the host has the updated microcode but the
+ hypervisor does not expose MD_CLEAR in CPUID. It's a best
+ effort approach without guarantee.
++
++ full,nosmt The same as mds=full, with SMT disabled on vulnerable
++ CPUs. This is the complete mitigation.
++
+ ======= ============================================================
+
+ If the CPU is affected and mds=off is not supplied on the kernel command
diff --git a/patches.arch/x86-speculation-mds-add-mds_clear_cpu_buffers.patch b/patches.arch/x86-speculation-mds-add-mds_clear_cpu_buffers.patch
new file mode 100644
index 0000000000..3d563bb801
--- /dev/null
+++ b/patches.arch/x86-speculation-mds-add-mds_clear_cpu_buffers.patch
@@ -0,0 +1,216 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 18 Feb 2019 23:13:06 +0100
+Subject: x86/speculation/mds: Add mds_clear_cpu_buffers()
+Git-commit: 6a9e529272517755904b7afa639f6db59ddb793e
+Patch-mainline: v5.1-rc1
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+The Microarchitectural Data Sampling (MDS) vulernabilities are mitigated by
+clearing the affected CPU buffers. The mechanism for clearing the buffers
+uses the unused and obsolete VERW instruction in combination with a
+microcode update which triggers a CPU buffer clear when VERW is executed.
+
+Provide a inline function with the assembly magic. The argument of the VERW
+instruction must be a memory operand as documented:
+
+ "MD_CLEAR enumerates that the memory-operand variant of VERW (for
+ example, VERW m16) has been extended to also overwrite buffers affected
+ by MDS. This buffer overwriting functionality is not guaranteed for the
+ register operand variant of VERW."
+
+Documentation also recommends to use a writable data segment selector:
+
+ "The buffer overwriting occurs regardless of the result of the VERW
+ permission check, as well as when the selector is null or causes a
+ descriptor load segment violation. However, for lowest latency we
+ recommend using a selector that indicates a valid writable data
+ segment."
+
+Add x86 specific documentation about MDS and the internal workings of the
+mitigation.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Reviewed-by: Jon Masters <jcm@redhat.com>
+Tested-by: Jon Masters <jcm@redhat.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ Documentation/index.rst | 1
+ Documentation/x86/conf.py | 10 +++
+ Documentation/x86/index.rst | 8 ++
+ Documentation/x86/mds.rst | 99 +++++++++++++++++++++++++++++++++++
+ arch/x86/include/asm/nospec-branch.h | 25 ++++++++
+ 5 files changed, 143 insertions(+)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -318,6 +318,31 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_
+ DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
+ DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+
++#include <asm/segment.h>
++
++/**
++ * mds_clear_cpu_buffers - Mitigation for MDS vulnerability
++ *
++ * This uses the otherwise unused and obsolete VERW instruction in
++ * combination with microcode which triggers a CPU buffer flush when the
++ * instruction is executed.
++ */
++static inline void mds_clear_cpu_buffers(void)
++{
++ static const u16 ds = __KERNEL_DS;
++
++ /*
++ * Has to be the memory-operand variant because only that
++ * guarantees the CPU buffer flush functionality according to
++ * documentation. The register-operand variant does not.
++ * Works with any segment selector, but a valid writable
++ * data segment is the fastest variant.
++ *
++ * "cc" clobber is required because VERW modifies ZF.
++ */
++ asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
++}
++
+ #endif /* __ASSEMBLY__ */
+
+ /*
+--- a/Documentation/index.rst
++++ b/Documentation/index.rst
+@@ -34,6 +34,7 @@ the kernel interface as seen by applicat
+ :maxdepth: 2
+
+ userspace-api/index
++ x86/index
+
+
+ Introduction to kernel development
+--- /dev/null
++++ b/Documentation/x86/conf.py
+@@ -0,0 +1,10 @@
++# -*- coding: utf-8; mode: python -*-
++
++project = "X86 architecture specific documentation"
++
++tags.add("subproject")
++
++latex_documents = [
++ ('index', 'x86.tex', project,
++ 'The kernel development community', 'manual'),
++]
+--- /dev/null
++++ b/Documentation/x86/index.rst
+@@ -0,0 +1,8 @@
++==========================
++x86 architecture specifics
++==========================
++
++.. toctree::
++ :maxdepth: 1
++
++ mds
+--- /dev/null
++++ b/Documentation/x86/mds.rst
+@@ -0,0 +1,99 @@
++Microarchitectural Data Sampling (MDS) mitigation
++=================================================
++
++.. _mds:
++
++Overview
++--------
++
++Microarchitectural Data Sampling (MDS) is a family of side channel attacks
++on internal buffers in Intel CPUs. The variants are:
++
++ - Microarchitectural Store Buffer Data Sampling (MSBDS) (CVE-2018-12126)
++ - Microarchitectural Fill Buffer Data Sampling (MFBDS) (CVE-2018-12130)
++ - Microarchitectural Load Port Data Sampling (MLPDS) (CVE-2018-12127)
++
++MSBDS leaks Store Buffer Entries which can be speculatively forwarded to a
++dependent load (store-to-load forwarding) as an optimization. The forward
++can also happen to a faulting or assisting load operation for a different
++memory address, which can be exploited under certain conditions. Store
++buffers are partitioned between Hyper-Threads so cross thread forwarding is
++not possible. But if a thread enters or exits a sleep state the store
++buffer is repartitioned which can expose data from one thread to the other.
++
++MFBDS leaks Fill Buffer Entries. Fill buffers are used internally to manage
++L1 miss situations and to hold data which is returned or sent in response
++to a memory or I/O operation. Fill buffers can forward data to a load
++operation and also write data to the cache. When the fill buffer is
++deallocated it can retain the stale data of the preceding operations which
++can then be forwarded to a faulting or assisting load operation, which can
++be exploited under certain conditions. Fill buffers are shared between
++Hyper-Threads so cross thread leakage is possible.
++
++MLPDS leaks Load Port Data. Load ports are used to perform load operations
++from memory or I/O. The received data is then forwarded to the register
++file or a subsequent operation. In some implementations the Load Port can
++contain stale data from a previous operation which can be forwarded to
++faulting or assisting loads under certain conditions, which again can be
++exploited eventually. Load ports are shared between Hyper-Threads so cross
++thread leakage is possible.
++
++
++Exposure assumptions
++--------------------
++
++It is assumed that attack code resides in user space or in a guest with one
++exception. The rationale behind this assumption is that the code construct
++needed for exploiting MDS requires:
++
++ - to control the load to trigger a fault or assist
++
++ - to have a disclosure gadget which exposes the speculatively accessed
++ data for consumption through a side channel.
++
++ - to control the pointer through which the disclosure gadget exposes the
++ data
++
++The existence of such a construct in the kernel cannot be excluded with
++100% certainty, but the complexity involved makes it extremly unlikely.
++
++There is one exception, which is untrusted BPF. The functionality of
++untrusted BPF is limited, but it needs to be thoroughly investigated
++whether it can be used to create such a construct.
++
++
++Mitigation strategy
++-------------------
++
++All variants have the same mitigation strategy at least for the single CPU
++thread case (SMT off): Force the CPU to clear the affected buffers.
++
++This is achieved by using the otherwise unused and obsolete VERW
++instruction in combination with a microcode update. The microcode clears
++the affected CPU buffers when the VERW instruction is executed.
++
++For virtualization there are two ways to achieve CPU buffer
++clearing. Either the modified VERW instruction or via the L1D Flush
++command. The latter is issued when L1TF mitigation is enabled so the extra
++VERW can be avoided. If the CPU is not affected by L1TF then VERW needs to
++be issued.
++
++If the VERW instruction with the supplied segment selector argument is
++executed on a CPU without the microcode update there is no side effect
++other than a small number of pointlessly wasted CPU cycles.
++
++This does not protect against cross Hyper-Thread attacks except for MSBDS
++which is only exploitable cross Hyper-thread when one of the Hyper-Threads
++enters a C-state.
++
++The kernel provides a function to invoke the buffer clearing:
++
++ mds_clear_cpu_buffers()
++
++The mitigation is invoked on kernel/userspace, hypervisor/guest and C-state
++(idle) transitions.
++
++According to current knowledge additional mitigations inside the kernel
++itself are not required because the necessary gadgets to expose the leaked
++data cannot be controlled in a way which allows exploitation from malicious
++user space or VM guests.
diff --git a/patches.arch/x86-speculation-mds-add-mitigation-control-for-mds.patch b/patches.arch/x86-speculation-mds-add-mitigation-control-for-mds.patch
new file mode 100644
index 0000000000..0cc8bfbf56
--- /dev/null
+++ b/patches.arch/x86-speculation-mds-add-mitigation-control-for-mds.patch
@@ -0,0 +1,186 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 18 Feb 2019 22:04:08 +0100
+Subject: x86/speculation/mds: Add mitigation control for MDS
+Git-commit: bc1241700acd82ec69fde98c5763ce51086269f8
+Patch-mainline: v5.1-rc1
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+Now that the mitigations are in place, add a command line parameter to
+control the mitigation, a mitigation selector function and a SMT update
+mechanism.
+
+This is the minimal straight forward initial implementation which just
+provides an always on/off mode. The command line parameter is:
+
+ mds=[full|off]
+
+This is consistent with the existing mitigations for other speculative
+hardware vulnerabilities.
+
+The idle invocation is dynamically updated according to the SMT state of
+the system similar to the dynamic update of the STIBP mitigation. The idle
+mitigation is limited to CPUs which are only affected by MSBDS and not any
+other variant, because the other variants cannot be mitigated on SMT
+enabled systems.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Jon Masters <jcm@redhat.com>
+Tested-by: Jon Masters <jcm@redhat.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ Documentation/admin-guide/kernel-parameters.txt | 22 +++++++
+ arch/x86/include/asm/processor.h | 5 +
+ arch/x86/kernel/cpu/bugs.c | 70 ++++++++++++++++++++++++
+ 3 files changed, 97 insertions(+)
+
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -982,4 +982,9 @@ enum l1tf_mitigations {
+
+ extern enum l1tf_mitigations l1tf_mitigation;
+
++enum mds_mitigations {
++ MDS_MITIGATION_OFF,
++ MDS_MITIGATION_FULL,
++};
++
+ #endif /* _ASM_X86_PROCESSOR_H */
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -34,6 +34,7 @@
+ static void __init spectre_v2_select_mitigation(void);
+ static void __init ssb_select_mitigation(void);
+ static void __init l1tf_select_mitigation(void);
++static void __init mds_select_mitigation(void);
+
+ /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
+ u64 x86_spec_ctrl_base;
+@@ -105,6 +106,8 @@ void __init check_bugs(void)
+
+ l1tf_select_mitigation();
+
++ mds_select_mitigation();
++
+ #ifdef CONFIG_X86_32
+ /*
+ * Check whether we are able to run this kernel safely on SMP.
+@@ -330,6 +333,50 @@ early_param("l1tf", l1tf_cmdline);
+
+
+ #undef pr_fmt
++#define pr_fmt(fmt) "MDS: " fmt
++
++/* Default mitigation for L1TF-affected CPUs */
++static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
++
++static const char * const mds_strings[] = {
++ [MDS_MITIGATION_OFF] = "Vulnerable",
++ [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers"
++};
++
++static void __init mds_select_mitigation(void)
++{
++ if (!boot_cpu_has_bug(X86_BUG_MDS)) {
++ mds_mitigation = MDS_MITIGATION_OFF;
++ return;
++ }
++
++ if (mds_mitigation == MDS_MITIGATION_FULL) {
++ if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
++ static_branch_enable(&mds_user_clear);
++ else
++ mds_mitigation = MDS_MITIGATION_OFF;
++ }
++ pr_info("%s\n", mds_strings[mds_mitigation]);
++}
++
++static int __init mds_cmdline(char *str)
++{
++ if (!boot_cpu_has_bug(X86_BUG_MDS))
++ return 0;
++
++ if (!str)
++ return -EINVAL;
++
++ if (!strcmp(str, "off"))
++ mds_mitigation = MDS_MITIGATION_OFF;
++ else if (!strcmp(str, "full"))
++ mds_mitigation = MDS_MITIGATION_FULL;
++
++ return 0;
++}
++early_param("mds", mds_cmdline);
++
++#undef pr_fmt
+ #define pr_fmt(fmt) "Spectre V2 : " fmt
+
+ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
+@@ -738,6 +785,26 @@ static void update_indir_branch_cond(voi
+ static_branch_disable(&switch_to_cond_stibp);
+ }
+
++/* Update the static key controlling the MDS CPU buffer clear in idle */
++static void update_mds_branch_idle(void)
++{
++ /*
++ * Enable the idle clearing if SMT is active on CPUs which are
++ * affected only by MSBDS and not any other MDS variant.
++ *
++ * The other variants cannot be mitigated when SMT is enabled, so
++ * clearing the buffers on idle just to prevent the Store Buffer
++ * repartitioning leak would be a window dressing exercise.
++ */
++ if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
++ return;
++
++ if (sched_smt_active())
++ static_branch_enable(&mds_idle_clear);
++ else
++ static_branch_disable(&mds_idle_clear);
++}
++
+ void arch_smt_update(void)
+ {
+ /* Enhanced IBRS implies STIBP. No update required. */
+@@ -758,6 +825,9 @@ void arch_smt_update(void)
+ break;
+ }
+
++ if (mds_mitigation == MDS_MITIGATION_FULL)
++ update_mds_branch_idle();
++
+ mutex_unlock(&spec_ctrl_mutex);
+ }
+
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2205,6 +2205,28 @@
+ Format: <first>,<last>
+ Specifies range of consoles to be captured by the MDA.
+
++ mds= [X86,INTEL]
++ Control mitigation for the Micro-architectural Data
++ Sampling (MDS) vulnerability.
++
++ Certain CPUs are vulnerable to an exploit against CPU
++ internal buffers which can forward information to a
++ disclosure gadget under certain conditions.
++
++ In vulnerable processors, the speculatively
++ forwarded data can be used in a cache side channel
++ attack, to access data to which the attacker does
++ not have direct access.
++
++ This parameter controls the MDS mitigation. The
++ options are:
++
++ full - Enable MDS mitigation on vulnerable CPUs
++ off - Unconditionally disable MDS mitigation
++
++ Not specifying this option is equivalent to
++ mds=full.
++
+ mem=nn[KMG] [KNL,BOOT] Force usage of a specific amount of memory
+ Amount of memory to be used when the kernel is not able
+ to see the whole system memory or for test.
diff --git a/patches.arch/x86-speculation-mds-add-mitigation-mode-vmwerv.patch b/patches.arch/x86-speculation-mds-add-mitigation-mode-vmwerv.patch
new file mode 100644
index 0000000000..4f0e1f0f07
--- /dev/null
+++ b/patches.arch/x86-speculation-mds-add-mitigation-mode-vmwerv.patch
@@ -0,0 +1,131 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 20 Feb 2019 09:40:40 +0100
+Subject: x86/speculation/mds: Add mitigation mode VMWERV
+Git-commit: 22dd8365088b6403630b82423cf906491859b65e
+Patch-mainline: v5.1-rc1
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+In virtualized environments it can happen that the host has the microcode
+update which utilizes the VERW instruction to clear CPU buffers, but the
+hypervisor is not yet updated to expose the X86_FEATURE_MD_CLEAR CPUID bit
+to guests.
+
+Introduce an internal mitigation mode VMWERV which enables the invocation
+of the CPU buffer clearing even if X86_FEATURE_MD_CLEAR is not set. If the
+system has no updated microcode this results in a pointless execution of
+the VERW instruction wasting a few CPU cycles. If the microcode is updated,
+but not exposed to a guest then the CPU buffers will be cleared.
+
+That said: Virtual Machines Will Eventually Receive Vaccine
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Jon Masters <jcm@redhat.com>
+Tested-by: Jon Masters <jcm@redhat.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ Documentation/x86/mds.rst | 27 +++++++++++++++++++++++++++
+ arch/x86/include/asm/processor.h | 1 +
+ arch/x86/kernel/cpu/bugs.c | 18 ++++++++++++------
+ 3 files changed, 40 insertions(+), 6 deletions(-)
+
+diff --git a/Documentation/x86/mds.rst b/Documentation/x86/mds.rst
+index 87ce8ac9f36e..3d6f943f1afb 100644
+--- a/Documentation/x86/mds.rst
++++ b/Documentation/x86/mds.rst
+@@ -93,11 +93,38 @@ The kernel provides a function to invoke the buffer clearing:
+ The mitigation is invoked on kernel/userspace, hypervisor/guest and C-state
+ (idle) transitions.
+
++As a special quirk to address virtualization scenarios where the host has
++the microcode updated, but the hypervisor does not (yet) expose the
++MD_CLEAR CPUID bit to guests, the kernel issues the VERW instruction in the
++hope that it might actually clear the buffers. The state is reflected
++accordingly.
++
+ According to current knowledge additional mitigations inside the kernel
+ itself are not required because the necessary gadgets to expose the leaked
+ data cannot be controlled in a way which allows exploitation from malicious
+ user space or VM guests.
+
++Kernel internal mitigation modes
++--------------------------------
++
++ ======= ============================================================
++ off Mitigation is disabled. Either the CPU is not affected or
++ mds=off is supplied on the kernel command line
++
++ full Mitigation is eanbled. CPU is affected and MD_CLEAR is
++ advertised in CPUID.
++
++ vmwerv Mitigation is enabled. CPU is affected and MD_CLEAR is not
++ advertised in CPUID. That is mainly for virtualization
++ scenarios where the host has the updated microcode but the
++ hypervisor does not expose MD_CLEAR in CPUID. It's a best
++ effort approach without guarantee.
++ ======= ============================================================
++
++If the CPU is affected and mds=off is not supplied on the kernel command
++line then the kernel selects the appropriate mitigation mode depending on
++the availability of the MD_CLEAR CPUID bit.
++
+ Mitigation points
+ -----------------
+
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index 1f0295783325..aca1ef8cc79f 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -995,6 +995,7 @@ extern enum l1tf_mitigations l1tf_mitigation;
+ enum mds_mitigations {
+ MDS_MITIGATION_OFF,
+ MDS_MITIGATION_FULL,
++ MDS_MITIGATION_VMWERV,
+ };
+
+ #endif /* _ASM_X86_PROCESSOR_H */
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 7ab16a6ed064..95cda38c8785 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -224,7 +224,8 @@ static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL
+
+ static const char * const mds_strings[] = {
+ [MDS_MITIGATION_OFF] = "Vulnerable",
+- [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers"
++ [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
++ [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
+ };
+
+ static void __init mds_select_mitigation(void)
+@@ -235,10 +236,9 @@ static void __init mds_select_mitigation(void)
+ }
+
+ if (mds_mitigation == MDS_MITIGATION_FULL) {
+- if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
+- static_branch_enable(&mds_user_clear);
+- else
+- mds_mitigation = MDS_MITIGATION_OFF;
++ if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
++ mds_mitigation = MDS_MITIGATION_VMWERV;
++ static_branch_enable(&mds_user_clear);
+ }
+ pr_info("%s\n", mds_strings[mds_mitigation]);
+ }
+@@ -705,8 +705,14 @@ void arch_smt_update(void)
+ break;
+ }
+
+- if (mds_mitigation == MDS_MITIGATION_FULL)
++ switch (mds_mitigation) {
++ case MDS_MITIGATION_FULL:
++ case MDS_MITIGATION_VMWERV:
+ update_mds_branch_idle();
++ break;
++ case MDS_MITIGATION_OFF:
++ break;
++ }
+
+ mutex_unlock(&spec_ctrl_mutex);
+ }
+
diff --git a/patches.arch/x86-speculation-mds-add-mitigations-support-for-mds.patch b/patches.arch/x86-speculation-mds-add-mitigations-support-for-mds.patch
new file mode 100644
index 0000000000..4405b7c895
--- /dev/null
+++ b/patches.arch/x86-speculation-mds-add-mitigations-support-for-mds.patch
@@ -0,0 +1,62 @@
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Date: Wed, 17 Apr 2019 16:39:02 -0500
+Subject: x86/speculation/mds: Add 'mitigations=' support for MDS
+Git-repo: tip/tip
+Git-commit: 5c14068f87d04adc73ba3f41c2a303d3c3d1fa12
+Patch-mainline: Queued in a subsystem tree
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+Add MDS to the new 'mitigations=' cmdline option.
+
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ Documentation/admin-guide/kernel-parameters.txt | 2 ++
+ arch/x86/kernel/cpu/bugs.c | 5 +++--
+ 2 files changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 9aa3543a8723..18cad2b0392a 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2556,6 +2556,7 @@
+ spectre_v2_user=off [X86]
+ spec_store_bypass_disable=off [X86,PPC]
+ l1tf=off [X86]
++ mds=off [X86]
+
+ auto (default)
+ Mitigate all CPU vulnerabilities, but leave SMT
+@@ -2570,6 +2571,7 @@
+ if needed. This is for users who always want to
+ be fully mitigated, even if it means losing SMT.
+ Equivalent to: l1tf=flush,nosmt [X86]
++ mds=full,nosmt [X86]
+
+ mminit_loglevel=
+ [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 3c5c3c3ba734..667c273a66d7 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -233,7 +233,7 @@ static const char * const mds_strings[] = {
+
+ static void __init mds_select_mitigation(void)
+ {
+- if (!boot_cpu_has_bug(X86_BUG_MDS)) {
++ if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
+ mds_mitigation = MDS_MITIGATION_OFF;
+ return;
+ }
+@@ -244,7 +244,8 @@ static void __init mds_select_mitigation(void)
+
+ static_branch_enable(&mds_user_clear);
+
+- if (mds_nosmt && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
++ if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
++ (mds_nosmt || cpu_mitigations_auto_nosmt()))
+ cpu_smt_disable(false);
+ }
+
+
diff --git a/patches.arch/x86-speculation-mds-add-smt-warning-message.patch b/patches.arch/x86-speculation-mds-add-smt-warning-message.patch
new file mode 100644
index 0000000000..0ba3c2f544
--- /dev/null
+++ b/patches.arch/x86-speculation-mds-add-smt-warning-message.patch
@@ -0,0 +1,50 @@
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Date: Tue, 2 Apr 2019 10:00:51 -0500
+Subject: x86/speculation/mds: Add SMT warning message
+Git-commit: 39226ef02bfb43248b7db12a4fdccb39d95318e3
+Git-repo: tip/tip
+Patch-mainline: Queued in a subsystem tree
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+MDS is vulnerable with SMT. Make that clear with a one-time printk
+whenever SMT first gets enabled.
+
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Tyler Hicks <tyhicks@canonical.com>
+Acked-by: Jiri Kosina <jkosina@suse.cz>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/bugs.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -800,6 +800,9 @@ static void update_indir_branch_cond(voi
+ static_branch_disable(&switch_to_cond_stibp);
+ }
+
++#undef pr_fmt
++#define pr_fmt(fmt) fmt
++
+ /* Update the static key controlling the MDS CPU buffer clear in idle */
+ static void update_mds_branch_idle(void)
+ {
+@@ -820,6 +823,8 @@ static void update_mds_branch_idle(void)
+ static_branch_disable(&mds_idle_clear);
+ }
+
++#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
++
+ void arch_smt_update(void)
+ {
+ /* Enhanced IBRS implies STIBP. No update required. */
+@@ -843,6 +848,8 @@ void arch_smt_update(void)
+ switch (mds_mitigation) {
+ case MDS_MITIGATION_FULL:
+ case MDS_MITIGATION_VMWERV:
++ if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
++ pr_warn_once(MDS_MSG_SMT);
+ update_mds_branch_idle();
+ break;
+ case MDS_MITIGATION_OFF:
diff --git a/patches.arch/x86-speculation-mds-add-sysfs-reporting-for-mds.patch b/patches.arch/x86-speculation-mds-add-sysfs-reporting-for-mds.patch
new file mode 100644
index 0000000000..2a1c7ad669
--- /dev/null
+++ b/patches.arch/x86-speculation-mds-add-sysfs-reporting-for-mds.patch
@@ -0,0 +1,120 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 18 Feb 2019 22:51:43 +0100
+Subject: x86/speculation/mds: Add sysfs reporting for MDS
+Git-commit: 8a4b06d391b0a42a373808979b5028f5c84d9c6a
+Patch-mainline: v5.1-rc1
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+Add the sysfs reporting file for MDS. It exposes the vulnerability and
+mitigation state similar to the existing files for the other speculative
+hardware vulnerabilities.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Jon Masters <jcm@redhat.com>
+Tested-by: Jon Masters <jcm@redhat.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ Documentation/ABI/testing/sysfs-devices-system-cpu | 1
+ arch/x86/kernel/cpu/bugs.c | 25 +++++++++++++++++++++
+ drivers/base/cpu.c | 8 ++++++
+ include/linux/cpu.h | 2 +
+ 4 files changed, 36 insertions(+)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1170,6 +1170,22 @@ static ssize_t l1tf_show_state(char *buf
+ }
+ #endif
+
++static ssize_t mds_show_state(char *buf)
++{
++ if (!hypervisor_is_type(X86_HYPER_NATIVE)) {
++ return sprintf(buf, "%s; SMT Host state unknown\n",
++ mds_strings[mds_mitigation]);
++ }
++
++ if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
++ return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
++ sched_smt_active() ? "mitigated" : "disabled");
++ }
++
++ return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
++ sched_smt_active() ? "vulnerable" : "disabled");
++}
++
+ static char *stibp_state(void)
+ {
+ if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+@@ -1236,6 +1252,10 @@ static ssize_t cpu_show_common(struct de
+ return l1tf_show_state(buf);
+ break;
+
++
++ case X86_BUG_MDS:
++ return mds_show_state(buf);
++
+ default:
+ break;
+ }
+@@ -1267,4 +1287,9 @@ ssize_t cpu_show_l1tf(struct device *dev
+ {
+ return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
+ }
++
++ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
++}
+ #endif
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -380,6 +380,7 @@ What: /sys/devices/system/cpu/vulnerabi
+ /sys/devices/system/cpu/vulnerabilities/spectre_v2
+ /sys/devices/system/cpu/vulnerabilities/spec_store_bypass
+ /sys/devices/system/cpu/vulnerabilities/l1tf
++ /sys/devices/system/cpu/vulnerabilities/mds
+ Date: January 2018
+ Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+ Description: Information about CPU vulnerabilities
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -533,11 +533,18 @@ ssize_t __weak cpu_show_l1tf(struct devi
+ return sprintf(buf, "Not affected\n");
+ }
+
++ssize_t __weak cpu_show_mds(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return sprintf(buf, "Not affected\n");
++}
++
+ static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+ static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+ static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
+ static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
+ static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
++static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
+
+ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ &dev_attr_meltdown.attr,
+@@ -545,6 +552,7 @@ static struct attribute *cpu_root_vulner
+ &dev_attr_spectre_v2.attr,
+ &dev_attr_spec_store_bypass.attr,
+ &dev_attr_l1tf.attr,
++ &dev_attr_mds.attr,
+ NULL
+ };
+
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -56,6 +56,8 @@ extern ssize_t cpu_show_spec_store_bypas
+ struct device_attribute *attr, char *buf);
+ extern ssize_t cpu_show_l1tf(struct device *dev,
+ struct device_attribute *attr, char *buf);
++extern ssize_t cpu_show_mds(struct device *dev,
++ struct device_attribute *attr, char *buf);
+
+ extern __printf(4, 5)
+ struct device *cpu_device_create(struct device *parent, void *drvdata,
diff --git a/patches.arch/x86-speculation-mds-clear-cpu-buffers-on-exit-to-user.patch b/patches.arch/x86-speculation-mds-clear-cpu-buffers-on-exit-to-user.patch
new file mode 100644
index 0000000000..8501ae91ae
--- /dev/null
+++ b/patches.arch/x86-speculation-mds-clear-cpu-buffers-on-exit-to-user.patch
@@ -0,0 +1,192 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 18 Feb 2019 23:42:51 +0100
+Subject: x86/speculation/mds: Clear CPU buffers on exit to user
+Git-commit: 04dcbdb8057827b043b3c71aa397c4c63e67d086
+Patch-mainline: v5.1-rc1
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+Add a static key which controls the invocation of the CPU buffer clear
+mechanism on exit to user space and add the call into
+prepare_exit_to_usermode() and do_nmi() right before actually returning.
+
+Add documentation which kernel to user space transition this covers and
+explain why some corner cases are not mitigated.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Reviewed-by: Jon Masters <jcm@redhat.com>
+Tested-by: Jon Masters <jcm@redhat.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ Documentation/x86/mds.rst | 52 +++++++++++++++++++++++++++++++++++
+ arch/x86/entry/common.c | 3 ++
+ arch/x86/include/asm/nospec-branch.h | 13 ++++++++
+ arch/x86/kernel/cpu/bugs.c | 3 ++
+ arch/x86/kernel/nmi.c | 4 ++
+ arch/x86/kernel/traps.c | 8 +++++
+ 6 files changed, 83 insertions(+)
+
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -30,6 +30,7 @@
+ #include <asm/vdso.h>
+ #include <linux/uaccess.h>
+ #include <asm/cpufeature.h>
++#include <asm/nospec-branch.h>
+
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/syscalls.h>
+@@ -208,6 +209,8 @@ __visible inline void prepare_exit_to_us
+ #endif
+
+ user_enter_irqoff();
++
++ mds_user_clear_cpu_buffers();
+ }
+
+ #define SYSCALL_EXIT_WORK_FLAGS \
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -318,6 +318,8 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_
+ DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
+ DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+
++DECLARE_STATIC_KEY_FALSE(mds_user_clear);
++
+ #include <asm/segment.h>
+
+ /**
+@@ -343,6 +345,17 @@ static inline void mds_clear_cpu_buffers
+ asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
+ }
+
++/**
++ * mds_user_clear_cpu_buffers - Mitigation for MDS vulnerability
++ *
++ * Clear CPU buffers if the corresponding static key is enabled
++ */
++static inline void mds_user_clear_cpu_buffers(void)
++{
++ if (static_branch_likely(&mds_user_clear))
++ mds_clear_cpu_buffers();
++}
++
+ #endif /* __ASSEMBLY__ */
+
+ /*
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -60,6 +60,9 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_i
+ /* Control unconditional IBPB in switch_mm() */
+ DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+
++/* Control MDS CPU buffer clear before returning to user space */
++DEFINE_STATIC_KEY_FALSE(mds_user_clear);
++
+ void __init check_bugs(void)
+ {
+ identify_boot_cpu();
+--- a/arch/x86/kernel/nmi.c
++++ b/arch/x86/kernel/nmi.c
+@@ -34,6 +34,7 @@
+ #include <asm/x86_init.h>
+ #include <asm/reboot.h>
+ #include <asm/cache.h>
++#include <asm/nospec-branch.h>
+
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/nmi.h>
+@@ -533,6 +534,9 @@ nmi_restart:
+ write_cr2(this_cpu_read(nmi_cr2));
+ if (this_cpu_dec_return(nmi_state))
+ goto nmi_restart;
++
++ if (user_mode(regs))
++ mds_user_clear_cpu_buffers();
+ }
+ NOKPROBE_SYMBOL(do_nmi);
+
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -59,6 +59,7 @@
+ #include <asm/alternative.h>
+ #include <asm/fpu/xstate.h>
+ #include <asm/trace/mpx.h>
++#include <asm/nospec-branch.h>
+ #include <asm/mpx.h>
+ #include <asm/vm86.h>
+
+@@ -393,6 +394,13 @@ dotraplinkage void do_double_fault(struc
+ regs->ip = (unsigned long)general_protection;
+ regs->sp = (unsigned long)&gpregs->orig_ax;
+
++ /*
++ * This situation can be triggered by userspace via
++ * modify_ldt(2) and the return does not take the regular
++ * user space exit, so a CPU buffer clear is required when
++ * MDS mitigation is enabled.
++ */
++ mds_user_clear_cpu_buffers();
+ return;
+ }
+ #endif
+--- a/Documentation/x86/mds.rst
++++ b/Documentation/x86/mds.rst
+@@ -97,3 +97,55 @@ According to current knowledge additiona
+ itself are not required because the necessary gadgets to expose the leaked
+ data cannot be controlled in a way which allows exploitation from malicious
+ user space or VM guests.
++
++Mitigation points
++-----------------
++
++1. Return to user space
++^^^^^^^^^^^^^^^^^^^^^^^
++
++ When transitioning from kernel to user space the CPU buffers are flushed
++ on affected CPUs when the mitigation is not disabled on the kernel
++ command line. The migitation is enabled through the static key
++ mds_user_clear.
++
++ The mitigation is invoked in prepare_exit_to_usermode() which covers
++ most of the kernel to user space transitions. There are a few exceptions
++ which are not invoking prepare_exit_to_usermode() on return to user
++ space. These exceptions use the paranoid exit code.
++
++ - Non Maskable Interrupt (NMI):
++
++ Access to sensible data like keys, credentials in the NMI context is
++ mostly theoretical: The CPU can do prefetching or execute a
++ misspeculated code path and thereby fetching data which might end up
++ leaking through a buffer.
++
++ But for mounting other attacks the kernel stack address of the task is
++ already valuable information. So in full mitigation mode, the NMI is
++ mitigated on the return from do_nmi() to provide almost complete
++ coverage.
++
++ - Double fault (#DF):
++
++ A double fault is usually fatal, but the ESPFIX workaround, which can
++ be triggered from user space through modify_ldt(2) is a recoverable
++ double fault. #DF uses the paranoid exit path, so explicit mitigation
++ in the double fault handler is required.
++
++ - Machine Check Exception (#MC):
++
++ Another corner case is a #MC which hits between the CPU buffer clear
++ invocation and the actual return to user. As this still is in kernel
++ space it takes the paranoid exit path which does not clear the CPU
++ buffers. So the #MC handler repopulates the buffers to some
++ extent. Machine checks are not reliably controllable and the window is
++ extremly small so mitigation would just tick a checkbox that this
++ theoretical corner case is covered. To keep the amount of special
++ cases small, ignore #MC.
++
++ - Debug Exception (#DB):
++
++ This takes the paranoid exit path only when the INT1 breakpoint is in
++ kernel space. #DB on a user space address takes the regular exit path,
++ so no extra mitigation required.
diff --git a/patches.arch/x86-speculation-mds-conditionally-clear-cpu-buffers-on-idle-entry.patch b/patches.arch/x86-speculation-mds-conditionally-clear-cpu-buffers-on-idle-entry.patch
new file mode 100644
index 0000000000..e55065d925
--- /dev/null
+++ b/patches.arch/x86-speculation-mds-conditionally-clear-cpu-buffers-on-idle-entry.patch
@@ -0,0 +1,224 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 18 Feb 2019 23:04:01 +0100
+Subject: x86/speculation/mds: Conditionally clear CPU buffers on idle entry
+Git-commit: 07f07f55a29cb705e221eda7894dd67ab81ef343
+Patch-mainline: v5.1-rc1
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+Add a static key which controls the invocation of the CPU buffer clear
+mechanism on idle entry. This is independent of other MDS mitigations
+because the idle entry invocation to mitigate the potential leakage due to
+store buffer repartitioning is only necessary on SMT systems.
+
+Add the actual invocations to the different halt/mwait variants which
+covers all usage sites. mwaitx is not patched as it's not available on
+Intel CPUs.
+
+The buffer clear is only invoked before entering the C-State to prevent
+that stale data from the idling CPU is spilled to the Hyper-Thread sibling
+after the Store buffer got repartitioned and all entries are available to
+the non idle sibling.
+
+When coming out of idle the store buffer is partitioned again so each
+sibling has half of it available. Now CPU which returned from idle could be
+speculatively exposed to contents of the sibling, but the buffers are
+flushed either on exit to user space or on VMENTER.
+
+When later on conditional buffer clearing is implemented on top of this,
+then there is no action required either because before returning to user
+space the context switch will set the condition flag which causes a flush
+on the return to user path.
+
+Note, that the buffer clearing on idle is only sensible on CPUs which are
+solely affected by MSBDS and not any other variant of MDS because the other
+MDS variants cannot be mitigated when SMT is enabled, so the buffer
+clearing on idle would be a window dressing exercise.
+
+This intentionally does not handle the case in the acpi/processor_idle
+driver which uses the legacy IO port interface for C-State transitions for
+two reasons:
+
+ - The acpi/processor_idle driver was replaced by the intel_idle driver
+ almost a decade ago. Anything Nehalem upwards supports it and defaults
+ to that new driver.
+
+ - The legacy IO port interface is likely to be used on older and therefore
+ unaffected CPUs or on systems which do not receive microcode updates
+ anymore, so there is no point in adding that.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Reviewed-by: Jon Masters <jcm@redhat.com>
+Tested-by: Jon Masters <jcm@redhat.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ Documentation/x86/mds.rst | 42 ++++++++++++++++++++++++++++++++++++
+ arch/x86/include/asm/irqflags.h | 4 ++++
+ arch/x86/include/asm/mwait.h | 7 ++++++
+ arch/x86/include/asm/nospec-branch.h | 12 +++++++++++
+ arch/x86/kernel/cpu/bugs.c | 3 +++
+ 5 files changed, 68 insertions(+)
+
+diff --git a/Documentation/x86/mds.rst b/Documentation/x86/mds.rst
+index 54d935bf283b..87ce8ac9f36e 100644
+--- a/Documentation/x86/mds.rst
++++ b/Documentation/x86/mds.rst
+@@ -149,3 +149,45 @@ Mitigation points
+ This takes the paranoid exit path only when the INT1 breakpoint is in
+ kernel space. #DB on a user space address takes the regular exit path,
+ so no extra mitigation required.
++
++
++2. C-State transition
++^^^^^^^^^^^^^^^^^^^^^
++
++ When a CPU goes idle and enters a C-State the CPU buffers need to be
++ cleared on affected CPUs when SMT is active. This addresses the
++ repartitioning of the store buffer when one of the Hyper-Threads enters
++ a C-State.
++
++ When SMT is inactive, i.e. either the CPU does not support it or all
++ sibling threads are offline CPU buffer clearing is not required.
++
++ The idle clearing is enabled on CPUs which are only affected by MSBDS
++ and not by any other MDS variant. The other MDS variants cannot be
++ protected against cross Hyper-Thread attacks because the Fill Buffer and
++ the Load Ports are shared. So on CPUs affected by other variants, the
++ idle clearing would be a window dressing exercise and is therefore not
++ activated.
++
++ The invocation is controlled by the static key mds_idle_clear which is
++ switched depending on the chosen mitigation mode and the SMT state of
++ the system.
++
++ The buffer clear is only invoked before entering the C-State to prevent
++ that stale data from the idling CPU from spilling to the Hyper-Thread
++ sibling after the store buffer got repartitioned and all entries are
++ available to the non idle sibling.
++
++ When coming out of idle the store buffer is partitioned again so each
++ sibling has half of it available. The back from idle CPU could be then
++ speculatively exposed to contents of the sibling. The buffers are
++ flushed either on exit to user space or on VMENTER so malicious code
++ in user space or the guest cannot speculatively access them.
++
++ The mitigation is hooked into all variants of halt()/mwait(), but does
++ not cover the legacy ACPI IO-Port mechanism because the ACPI idle driver
++ has been superseded by the intel_idle driver around 2010 and is
++ preferred on all affected CPUs which are expected to gain the MD_CLEAR
++ functionality in microcode. Aside of that the IO-Port mechanism is a
++ legacy interface which is only used on older systems which are either
++ not affected or do not receive microcode updates anymore.
+diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
+index 058e40fed167..8a0e56e1dcc9 100644
+--- a/arch/x86/include/asm/irqflags.h
++++ b/arch/x86/include/asm/irqflags.h
+@@ -6,6 +6,8 @@
+
+ #ifndef __ASSEMBLY__
+
++#include <asm/nospec-branch.h>
++
+ /* Provide __cpuidle; we can't safely include <linux/cpu.h> */
+ #define __cpuidle __attribute__((__section__(".cpuidle.text")))
+
+@@ -54,11 +56,13 @@ static inline void native_irq_enable(void)
+
+ static inline __cpuidle void native_safe_halt(void)
+ {
++ mds_idle_clear_cpu_buffers();
+ asm volatile("sti; hlt": : :"memory");
+ }
+
+ static inline __cpuidle void native_halt(void)
+ {
++ mds_idle_clear_cpu_buffers();
+ asm volatile("hlt": : :"memory");
+ }
+
+diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
+index 39a2fb29378a..eb0f80ce8524 100644
+--- a/arch/x86/include/asm/mwait.h
++++ b/arch/x86/include/asm/mwait.h
+@@ -6,6 +6,7 @@
+ #include <linux/sched/idle.h>
+
+ #include <asm/cpufeature.h>
++#include <asm/nospec-branch.h>
+
+ #define MWAIT_SUBSTATE_MASK 0xf
+ #define MWAIT_CSTATE_MASK 0xf
+@@ -40,6 +41,8 @@ static inline void __monitorx(const void *eax, unsigned long ecx,
+
+ static inline void __mwait(unsigned long eax, unsigned long ecx)
+ {
++ mds_idle_clear_cpu_buffers();
++
+ /* "mwait %eax, %ecx;" */
+ asm volatile(".byte 0x0f, 0x01, 0xc9;"
+ :: "a" (eax), "c" (ecx));
+@@ -74,6 +77,8 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
+ static inline void __mwaitx(unsigned long eax, unsigned long ebx,
+ unsigned long ecx)
+ {
++ /* No MDS buffer clear as this is AMD/HYGON only */
++
+ /* "mwaitx %eax, %ebx, %ecx;" */
+ asm volatile(".byte 0x0f, 0x01, 0xfb;"
+ :: "a" (eax), "b" (ebx), "c" (ecx));
+@@ -81,6 +86,8 @@ static inline void __mwaitx(unsigned long eax, unsigned long ebx,
+
+ static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
+ {
++ mds_idle_clear_cpu_buffers();
++
+ trace_hardirqs_on();
+ /* "mwait %eax, %ecx;" */
+ asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index 65b747286d96..4e970390110f 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -319,6 +319,7 @@ DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
+ DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+
+ DECLARE_STATIC_KEY_FALSE(mds_user_clear);
++DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
+
+ #include <asm/segment.h>
+
+@@ -356,6 +357,17 @@ static inline void mds_user_clear_cpu_buffers(void)
+ mds_clear_cpu_buffers();
+ }
+
++/**
++ * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
++ *
++ * Clear CPU buffers if the corresponding static key is enabled
++ */
++static inline void mds_idle_clear_cpu_buffers(void)
++{
++ if (static_branch_likely(&mds_idle_clear))
++ mds_clear_cpu_buffers();
++}
++
+ #endif /* __ASSEMBLY__ */
+
+ /*
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 29ed8e8dfee2..916995167301 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -66,6 +66,9 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+ /* Control MDS CPU buffer clear before returning to user space */
+ DEFINE_STATIC_KEY_FALSE(mds_user_clear);
+ EXPORT_SYMBOL_GPL(mds_user_clear);
++/* Control MDS CPU buffer clear before idling (halt, mwait) */
++DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
++EXPORT_SYMBOL_GPL(mds_idle_clear);
+
+ void __init check_bugs(void)
+ {
+
diff --git a/patches.arch/x86-speculation-mds-print-smt-vulnerable-on-msbds-with-mitigations-off.patch b/patches.arch/x86-speculation-mds-print-smt-vulnerable-on-msbds-with-mitigations-off.patch
new file mode 100644
index 0000000000..c9762dddd2
--- /dev/null
+++ b/patches.arch/x86-speculation-mds-print-smt-vulnerable-on-msbds-with-mitigations-off.patch
@@ -0,0 +1,49 @@
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Fri, 12 Apr 2019 17:50:58 -0400
+Subject: x86/speculation/mds: Print SMT vulnerable on MSBDS with mitigations off
+Git-repo: tip/tip
+Git-commit: e2c3c94788b08891dcf3dbe608f9880523ecd71b
+Patch-mainline: Queued in a subsystem tree
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+This code is only for CPUs which are affected by MSBDS, but are *not*
+affected by the other two MDS issues.
+
+For such CPUs, enabling the mds_idle_clear mitigation is enough to
+mitigate SMT.
+
+However if user boots with 'mds=off' and still has SMT enabled, we should
+not report that SMT is mitigated:
+
+$cat /sys//devices/system/cpu/vulnerabilities/mds
+Vulnerable; SMT mitigated
+
+But rather:
+Vulnerable; SMT vulnerable
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Tyler Hicks <tyhicks@canonical.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20190412215118.294906495@localhost.localdomain
+
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/bugs.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 0642505dda69..6b8a55c7cebc 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1204,7 +1204,8 @@ static ssize_t mds_show_state(char *buf)
+
+ if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
+ return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
+- sched_smt_active() ? "mitigated" : "disabled");
++ (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
++ sched_smt_active() ? "mitigated" : "disabled"));
+ }
+
+ return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
+
diff --git a/patches.arch/x86-speculation-move-arch_smt_update-call-to-after-mitigation-decisions.patch b/patches.arch/x86-speculation-move-arch_smt_update-call-to-after-mitigation-decisions.patch
new file mode 100644
index 0000000000..cfcda3d0e4
--- /dev/null
+++ b/patches.arch/x86-speculation-move-arch_smt_update-call-to-after-mitigation-decisions.patch
@@ -0,0 +1,45 @@
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Date: Tue, 2 Apr 2019 10:00:14 -0500
+Subject: x86/speculation: Move arch_smt_update() call to after mitigation decisions
+Git-repo: tip/tip
+Git-commit: 7c3658b20194a5b3209a143f63bc9c643c6a3ae2
+Patch-mainline: Queued in a subsystem tree
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+arch_smt_update() now has a dependency on both Spectre v2 and MDS
+mitigations. Move its initial call to after all the mitigation decisions
+have been made.
+
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Tyler Hicks <tyhicks@canonical.com>
+Acked-by: Jiri Kosina <jkosina@suse.cz>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/bugs.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 9f252082a83b..3f934ffef8cf 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -111,6 +111,8 @@ void __init check_bugs(void)
+
+ mds_select_mitigation();
+
++ arch_smt_update();
++
+ #ifdef CONFIG_X86_32
+ /*
+ * Check whether we are able to run this kernel safely on SMP.
+@@ -638,9 +640,6 @@ static void __init spectre_v2_select_mitigation(void)
+
+ /* Set up IBPB and STIBP depending on the general spectre V2 command */
+ spectre_v2_user_select_mitigation(cmd);
+-
+- /* Enable STIBP if appropriate */
+- arch_smt_update();
+ }
+
+ static void update_stibp_msr(void * __unused)
+
diff --git a/patches.arch/x86-speculation-simplify-the-cpu-bug-detection-logic.patch b/patches.arch/x86-speculation-simplify-the-cpu-bug-detection-logic.patch
new file mode 100644
index 0000000000..2ca003db81
--- /dev/null
+++ b/patches.arch/x86-speculation-simplify-the-cpu-bug-detection-logic.patch
@@ -0,0 +1,84 @@
+From: Dominik Brodowski <linux@dominikbrodowski.net>
+Date: Tue, 22 May 2018 11:05:39 +0200
+Subject: x86/speculation: Simplify the CPU bug detection logic
+Git-commit: 8ecc4979b1bd9c94168e6fc92960033b7a951336
+Patch-mainline: v4.17-rc7
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+Only CPUs which speculate can speculate. Therefore, it seems prudent
+to test for cpu_no_speculation first and only then determine whether
+a specific speculating CPU is susceptible to store bypass speculation.
+This is underlined by all CPUs currently listed in cpu_no_speculation
+were present in cpu_no_spec_store_bypass as well.
+
+Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: bp@suse.de
+Cc: konrad.wilk@oracle.com
+Link: https://lkml.kernel.org/r/20180522090539.GA24668@light.dominikbrodowski.net
+
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/common.c | 22 +++++++---------------
+ 1 file changed, 7 insertions(+), 15 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 78decc3e3067..38276f58d3bf 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -942,12 +942,8 @@ static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
+ {}
+ };
+
++/* Only list CPUs which speculate but are non susceptible to SSB */
+ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
+@@ -955,14 +951,10 @@ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
+- { X86_VENDOR_CENTAUR, 5, },
+- { X86_VENDOR_INTEL, 5, },
+- { X86_VENDOR_NSC, 5, },
+ { X86_VENDOR_AMD, 0x12, },
+ { X86_VENDOR_AMD, 0x11, },
+ { X86_VENDOR_AMD, 0x10, },
+ { X86_VENDOR_AMD, 0xf, },
+- { X86_VENDOR_ANY, 4, },
+ {}
+ };
+
+@@ -970,6 +962,12 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ {
+ u64 ia32_cap = 0;
+
++ if (x86_match_cpu(cpu_no_speculation))
++ return;
++
++ setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
++ setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
++
+ if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+
+@@ -977,12 +975,6 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ !(ia32_cap & ARCH_CAP_SSB_NO))
+ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
+- if (x86_match_cpu(cpu_no_speculation))
+- return;
+-
+- setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+- setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+-
+ if (x86_match_cpu(cpu_no_meltdown))
+ return;
+
+
diff --git a/patches.arch/x86-speculation-support-enhanced-ibrs-on-future-cpus.patch b/patches.arch/x86-speculation-support-enhanced-ibrs-on-future-cpus.patch
index e0333fec19..6a78531142 100644
--- a/patches.arch/x86-speculation-support-enhanced-ibrs-on-future-cpus.patch
+++ b/patches.arch/x86-speculation-support-enhanced-ibrs-on-future-cpus.patch
@@ -95,7 +95,7 @@ Acked-by: Borislav Petkov <bp@suse.de>
};
#undef pr_fmt
-@@ -347,6 +348,13 @@ static void __init spectre_v2_select_mit
+@@ -348,6 +349,13 @@ static void __init spectre_v2_select_mit
case SPECTRE_V2_CMD_FORCE:
case SPECTRE_V2_CMD_AUTO:
@@ -109,7 +109,7 @@ Acked-by: Borislav Petkov <bp@suse.de>
if (IS_ENABLED(CONFIG_RETPOLINE))
goto retpoline_auto;
break;
-@@ -384,6 +392,7 @@ retpoline_auto:
+@@ -385,6 +393,7 @@ retpoline_auto:
setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
}
@@ -117,7 +117,7 @@ Acked-by: Borislav Petkov <bp@suse.de>
spectre_v2_enabled = mode;
pr_info("%s\n", spectre_v2_strings[mode]);
-@@ -406,9 +415,16 @@ retpoline_auto:
+@@ -407,9 +416,16 @@ retpoline_auto:
/*
* Retpoline means the kernel is safe because it has no indirect
@@ -138,9 +138,9 @@ Acked-by: Borislav Petkov <bp@suse.de>
}
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
-@@ -957,6 +957,9 @@ static void __init cpu_set_bug_bits(stru
- setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
- setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+@@ -956,6 +956,9 @@ static void __init cpu_set_bug_bits(stru
+ !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
+ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+ if (ia32_cap & ARCH_CAP_IBRS_ALL)
+ setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
diff --git a/patches.drivers/iommu-vt-d-don-t-request-page-request-irq-under-dmar_global_lock b/patches.drivers/iommu-vt-d-don-t-request-page-request-irq-under-dmar_global_lock
new file mode 100644
index 0000000000..02ec82e57c
--- /dev/null
+++ b/patches.drivers/iommu-vt-d-don-t-request-page-request-irq-under-dmar_global_lock
@@ -0,0 +1,101 @@
+From: Lu Baolu <baolu.lu@linux.intel.com>
+Date: Fri, 19 Apr 2019 14:43:29 +0800
+Subject: iommu/vt-d: Don't request page request irq under dmar_global_lock
+Git-commit: a7755c3cfa5df755e39447b08c28203e011fb98c
+References: bsc#1135006
+Patch-mainline: v5.2-rc1
+
+Requesting page reqest irq under dmar_global_lock could cause
+potential lock race condition (caught by lockdep).
+
+[ 4.100055] ======================================================
+[ 4.100063] WARNING: possible circular locking dependency detected
+[ 4.100072] 5.1.0-rc4+ #2169 Not tainted
+[ 4.100078] ------------------------------------------------------
+[ 4.100086] swapper/0/1 is trying to acquire lock:
+[ 4.100094] 000000007dcbe3c3 (dmar_lock){+.+.}, at: dmar_alloc_hwirq+0x35/0x140
+[ 4.100112] but task is already holding lock:
+[ 4.100120] 0000000060bbe946 (dmar_global_lock){++++}, at: intel_iommu_init+0x191/0x1438
+[ 4.100136] which lock already depends on the new lock.
+[ 4.100146] the existing dependency chain (in reverse order) is:
+[ 4.100155]
+ -> #2 (dmar_global_lock){++++}:
+[ 4.100169] down_read+0x44/0xa0
+[ 4.100178] intel_irq_remapping_alloc+0xb2/0x7b0
+[ 4.100186] mp_irqdomain_alloc+0x9e/0x2e0
+[ 4.100195] __irq_domain_alloc_irqs+0x131/0x330
+[ 4.100203] alloc_isa_irq_from_domain.isra.4+0x9a/0xd0
+[ 4.100212] mp_map_pin_to_irq+0x244/0x310
+[ 4.100221] setup_IO_APIC+0x757/0x7ed
+[ 4.100229] x86_late_time_init+0x17/0x1c
+[ 4.100238] start_kernel+0x425/0x4e3
+[ 4.100247] secondary_startup_64+0xa4/0xb0
+[ 4.100254]
+ -> #1 (irq_domain_mutex){+.+.}:
+[ 4.100265] __mutex_lock+0x7f/0x9d0
+[ 4.100273] __irq_domain_add+0x195/0x2b0
+[ 4.100280] irq_domain_create_hierarchy+0x3d/0x40
+[ 4.100289] msi_create_irq_domain+0x32/0x110
+[ 4.100297] dmar_alloc_hwirq+0x111/0x140
+[ 4.100305] dmar_set_interrupt.part.14+0x1a/0x70
+[ 4.100314] enable_drhd_fault_handling+0x2c/0x6c
+[ 4.100323] apic_bsp_setup+0x75/0x7a
+[ 4.100330] x86_late_time_init+0x17/0x1c
+[ 4.100338] start_kernel+0x425/0x4e3
+[ 4.100346] secondary_startup_64+0xa4/0xb0
+[ 4.100352]
+ -> #0 (dmar_lock){+.+.}:
+[ 4.100364] lock_acquire+0xb4/0x1c0
+[ 4.100372] __mutex_lock+0x7f/0x9d0
+[ 4.100379] dmar_alloc_hwirq+0x35/0x140
+[ 4.100389] intel_svm_enable_prq+0x61/0x180
+[ 4.100397] intel_iommu_init+0x1128/0x1438
+[ 4.100406] pci_iommu_init+0x16/0x3f
+[ 4.100414] do_one_initcall+0x5d/0x2be
+[ 4.100422] kernel_init_freeable+0x1f0/0x27c
+[ 4.100431] kernel_init+0xa/0x110
+[ 4.100438] ret_from_fork+0x3a/0x50
+[ 4.100444]
+ other info that might help us debug this:
+
+[ 4.100454] Chain exists of:
+ dmar_lock --> irq_domain_mutex --> dmar_global_lock
+[ 4.100469] Possible unsafe locking scenario:
+
+[ 4.100476] CPU0 CPU1
+[ 4.100483] ---- ----
+[ 4.100488] lock(dmar_global_lock);
+[ 4.100495] lock(irq_domain_mutex);
+[ 4.100503] lock(dmar_global_lock);
+[ 4.100512] lock(dmar_lock);
+[ 4.100518]
+ *** DEADLOCK ***
+
+Cc: Ashok Raj <ashok.raj@intel.com>
+Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
+Cc: Kevin Tian <kevin.tian@intel.com>
+Reported-by: Dave Jiang <dave.jiang@intel.com>
+Fixes: a222a7f0bb6c9 ("iommu/vt-d: Implement page request handling")
+Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+---
+ drivers/iommu/intel-iommu.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -3435,7 +3435,13 @@ domains_done:
+
+ #ifdef CONFIG_INTEL_IOMMU_SVM
+ if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
++ /*
++ * Call dmar_alloc_hwirq() with dmar_global_lock held,
++ * could cause possible lock race condition.
++ */
++ up_write(&dmar_global_lock);
+ ret = intel_svm_enable_prq(iommu);
++ down_write(&dmar_global_lock);
+ if (ret)
+ goto free_iommu;
+ }
+
diff --git a/patches.drivers/iommu-vt-d-make-kernel-parameter-igfx_off-work-with-viommu b/patches.drivers/iommu-vt-d-make-kernel-parameter-igfx_off-work-with-viommu
new file mode 100644
index 0000000000..4ede87ac04
--- /dev/null
+++ b/patches.drivers/iommu-vt-d-make-kernel-parameter-igfx_off-work-with-viommu
@@ -0,0 +1,42 @@
+From: Lu Baolu <baolu.lu@linux.intel.com>
+Date: Thu, 2 May 2019 09:34:26 +0800
+Subject: iommu/vt-d: Make kernel parameter igfx_off work with vIOMMU
+Git-commit: 5daab58043ee2bca861068e2595564828f3bc663
+References: bsc#1135007
+Patch-mainline: v5.2-rc1
+
+The kernel parameter igfx_off is used by users to disable
+DMA remapping for the Intel integrated graphic device. It
+was designed for bare metal cases where a dedicated IOMMU
+is used for graphic. This doesn't apply to virtual IOMMU
+case where an include-all IOMMU is used. This makes the
+kernel parameter work with virtual IOMMU as well.
+
+Cc: Ashok Raj <ashok.raj@intel.com>
+Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
+Suggested-by: Kevin Tian <kevin.tian@intel.com>
+Fixes: c0771df8d5297 ("intel-iommu: Export a flag indicating that the IOMMU is used for iGFX.")
+Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
+Tested-by: Zhenyu Wang <zhenyuw@linux.intel.com>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+---
+ drivers/iommu/intel-iommu.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -3351,9 +3351,12 @@ static int __init init_dmars(void)
+ iommu_identity_mapping |= IDENTMAP_ALL;
+
+ #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
+- iommu_identity_mapping |= IDENTMAP_GFX;
++ dmar_map_gfx = 0;
+ #endif
+
++ if (!dmar_map_gfx)
++ iommu_identity_mapping |= IDENTMAP_GFX;
++
+ check_tylersburg_isoch();
+
+ if (iommu_identity_mapping) {
+
diff --git a/patches.drivers/iommu-vt-d-set-intel_iommu_gfx_mapped-correctly b/patches.drivers/iommu-vt-d-set-intel_iommu_gfx_mapped-correctly
new file mode 100644
index 0000000000..f84fdbca56
--- /dev/null
+++ b/patches.drivers/iommu-vt-d-set-intel_iommu_gfx_mapped-correctly
@@ -0,0 +1,49 @@
+From: Lu Baolu <baolu.lu@linux.intel.com>
+Date: Thu, 2 May 2019 09:34:25 +0800
+Subject: iommu/vt-d: Set intel_iommu_gfx_mapped correctly
+Git-commit: cf1ec4539a50bdfe688caad4615ca47646884316
+References: bsc#1135008
+Patch-mainline: v5.2-rc1
+
+The intel_iommu_gfx_mapped flag is exported by the Intel
+IOMMU driver to indicate whether an IOMMU is used for the
+graphic device. In a virtualized IOMMU environment (e.g.
+QEMU), an include-all IOMMU is used for graphic device.
+This flag is found to be clear even the IOMMU is used.
+
+Cc: Ashok Raj <ashok.raj@intel.com>
+Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
+Cc: Kevin Tian <kevin.tian@intel.com>
+Reported-by: Zhenyu Wang <zhenyuw@linux.intel.com>
+Fixes: c0771df8d5297 ("intel-iommu: Export a flag indicating that the IOMMU is used for iGFX.")
+Suggested-by: Kevin Tian <kevin.tian@intel.com>
+Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+---
+ drivers/iommu/intel-iommu.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -4131,9 +4131,7 @@ static void __init init_no_remapping_dev
+
+ /* This IOMMU has *only* gfx devices. Either bypass it or
+ set the gfx_mapped flag, as appropriate */
+- if (dmar_map_gfx) {
+- intel_iommu_gfx_mapped = 1;
+- } else {
++ if (!dmar_map_gfx) {
+ drhd->ignored = 1;
+ for_each_active_dev_scope(drhd->devices,
+ drhd->devices_cnt, i, dev)
+@@ -4933,6 +4931,9 @@ int __init intel_iommu_init(void)
+ goto out_free_reserved_range;
+ }
+
++ if (dmar_map_gfx)
++ intel_iommu_gfx_mapped = 1;
++
+ init_no_remapping_devices();
+
+ ret = init_dmars();
+
diff --git a/patches.drivers/net-ibmvnic-Update-MAC-address-settings-after-adapte.patch b/patches.drivers/net-ibmvnic-Update-MAC-address-settings-after-adapte.patch
new file mode 100644
index 0000000000..f91883fe2c
--- /dev/null
+++ b/patches.drivers/net-ibmvnic-Update-MAC-address-settings-after-adapte.patch
@@ -0,0 +1,160 @@
+From 62740e97881c78b45a117a358a866fb32975def6 Mon Sep 17 00:00:00 2001
+From: Thomas Falcon <tlfalcon@linux.ibm.com>
+Date: Thu, 9 May 2019 23:13:43 -0500
+Subject: [PATCH] net/ibmvnic: Update MAC address settings after adapter reset
+
+References: bsc#1134760
+Patch-mainline: v5.2-rc1
+Git-commit: 62740e97881c78b45a117a358a866fb32975def6
+
+It was discovered in testing that the underlying hardware MAC
+address will revert to initial settings following a device reset,
+but the driver fails to resend the current OS MAC settings. This
+oversight can result in dropped packets should the scenario occur.
+Fix this by informing hardware of current MAC address settings
+following any adapter initialization or resets.
+
+Signed-off-by: Thomas Falcon <tlfalcon@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+---
+ drivers/net/ethernet/ibm/ibmvnic.c | 53 ++++++++++++++++--------------
+ drivers/net/ethernet/ibm/ibmvnic.h | 2 --
+ 2 files changed, 28 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index b398d6c94dbd..2be3bcd0192f 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -118,7 +118,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
+ static int ibmvnic_init(struct ibmvnic_adapter *);
+ static int ibmvnic_reset_init(struct ibmvnic_adapter *);
+ static void release_crq_queue(struct ibmvnic_adapter *);
+-static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
++static int __ibmvnic_set_mac(struct net_device *, u8 *);
+ static int init_crq_queue(struct ibmvnic_adapter *adapter);
+ static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
+
+@@ -849,11 +849,7 @@ static int ibmvnic_login(struct net_device *netdev)
+ }
+ } while (retry);
+
+- /* handle pending MAC address changes after successful login */
+- if (adapter->mac_change_pending) {
+- __ibmvnic_set_mac(netdev, &adapter->desired.mac);
+- adapter->mac_change_pending = false;
+- }
++ __ibmvnic_set_mac(netdev, adapter->mac_addr);
+
+ return 0;
+ }
+@@ -1686,28 +1682,40 @@ static void ibmvnic_set_multi(struct net_device *netdev)
+ }
+ }
+
+-static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p)
++static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
+ {
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+- struct sockaddr *addr = p;
+ union ibmvnic_crq crq;
+ int rc;
+
+- if (!is_valid_ether_addr(addr->sa_data))
+- return -EADDRNOTAVAIL;
++ if (!is_valid_ether_addr(dev_addr)) {
++ rc = -EADDRNOTAVAIL;
++ goto err;
++ }
+
+ memset(&crq, 0, sizeof(crq));
+ crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
+ crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
+- ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
++ ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
+
+ init_completion(&adapter->fw_done);
+ rc = ibmvnic_send_crq(adapter, &crq);
+- if (rc)
+- return rc;
++ if (rc) {
++ rc = -EIO;
++ goto err;
++ }
++
+ wait_for_completion(&adapter->fw_done);
+ /* netdev->dev_addr is changed in handle_change_mac_rsp function */
+- return adapter->fw_done_rc ? -EIO : 0;
++ if (adapter->fw_done_rc) {
++ rc = -EIO;
++ goto err;
++ }
++
++ return 0;
++err:
++ ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
++ return rc;
+ }
+
+ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
+@@ -1716,13 +1724,10 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
+ struct sockaddr *addr = p;
+ int rc;
+
+- if (adapter->state == VNIC_PROBED) {
+- memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
+- adapter->mac_change_pending = true;
+- return 0;
+- }
+-
+- rc = __ibmvnic_set_mac(netdev, addr);
++ rc = 0;
++ ether_addr_copy(adapter->mac_addr, addr->sa_data);
++ if (adapter->state != VNIC_PROBED)
++ rc = __ibmvnic_set_mac(netdev, addr->sa_data);
+
+ return rc;
+ }
+@@ -3937,8 +3942,8 @@ static int handle_change_mac_rsp(union ibmvnic_crq *crq,
+ dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
+ goto out;
+ }
+- memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
+- ETH_ALEN);
++ ether_addr_copy(netdev->dev_addr,
++ &crq->change_mac_addr_rsp.mac_addr[0]);
+ out:
+ complete(&adapter->fw_done);
+ return rc;
+@@ -4852,8 +4857,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
+ init_completion(&adapter->init_done);
+ adapter->resetting = false;
+
+- adapter->mac_change_pending = false;
+-
+ do {
+ rc = init_crq_queue(adapter);
+ if (rc) {
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
+index cffdac372a33..dcf2eb6d9290 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.h
++++ b/drivers/net/ethernet/ibm/ibmvnic.h
+@@ -969,7 +969,6 @@ struct ibmvnic_tunables {
+ u64 rx_entries;
+ u64 tx_entries;
+ u64 mtu;
+- struct sockaddr mac;
+ };
+
+ struct ibmvnic_adapter {
+@@ -1091,7 +1090,6 @@ struct ibmvnic_adapter {
+ bool resetting;
+ bool napi_enabled, from_passive_init;
+
+- bool mac_change_pending;
+ bool failover_pending;
+ bool force_reset_recovery;
+
+--
+2.20.1
+
diff --git a/patches.drivers/net-ibmvnic-Update-carrier-state-after-link-state-ch.patch b/patches.drivers/net-ibmvnic-Update-carrier-state-after-link-state-ch.patch
new file mode 100644
index 0000000000..5acb3e95be
--- /dev/null
+++ b/patches.drivers/net-ibmvnic-Update-carrier-state-after-link-state-ch.patch
@@ -0,0 +1,66 @@
+From 0655f9943df2f2d71f406fd77b51d05548134fc2 Mon Sep 17 00:00:00 2001
+From: Thomas Falcon <tlfalcon@linux.ibm.com>
+Date: Thu, 9 May 2019 23:13:44 -0500
+Subject: [PATCH] net/ibmvnic: Update carrier state after link state change
+
+References: bsc#1135100
+Patch-mainline: v5.2-rc1
+Git-commit: 0655f9943df2f2d71f406fd77b51d05548134fc2
+
+Only set the device carrier state to on after receiving an up link
+state indication from the underlying adapter. Likewise, if a down
+link indication is receieved, update the carrier state accordingly.
+This fix ensures that accurate carrier state is reported by the driver
+following a link state update by the underlying adapter.
+
+Signed-off-by: Thomas Falcon <tlfalcon@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+---
+ drivers/net/ethernet/ibm/ibmvnic.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 2be3bcd0192f..3dcd9c3d8781 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -1111,7 +1111,6 @@ static int ibmvnic_open(struct net_device *netdev)
+ }
+
+ rc = __ibmvnic_open(netdev);
+- netif_carrier_on(netdev);
+
+ return rc;
+ }
+@@ -1864,8 +1863,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
+ adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
+
+- netif_carrier_on(netdev);
+-
+ return 0;
+ }
+
+@@ -1935,8 +1932,6 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
+ return 0;
+ }
+
+- netif_carrier_on(netdev);
+-
+ return 0;
+ }
+
+@@ -4480,6 +4475,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
+ crq->link_state_indication.phys_link_state;
+ adapter->logical_link_state =
+ crq->link_state_indication.logical_link_state;
++ if (adapter->phys_link_state && adapter->logical_link_state)
++ netif_carrier_on(netdev);
++ else
++ netif_carrier_off(netdev);
+ break;
+ case CHANGE_MAC_ADDR_RSP:
+ netdev_dbg(netdev, "Got MAC address change Response\n");
+--
+2.20.1
+
diff --git a/patches.drivers/phy-sun4i-usb-Make-sure-to-disable-PHY0-passby-for-p.patch b/patches.drivers/phy-sun4i-usb-Make-sure-to-disable-PHY0-passby-for-p.patch
new file mode 100644
index 0000000000..84e3a9aa93
--- /dev/null
+++ b/patches.drivers/phy-sun4i-usb-Make-sure-to-disable-PHY0-passby-for-p.patch
@@ -0,0 +1,50 @@
+From e6f32efb1b128344a2c7df9875bc1a1abaa1d395 Mon Sep 17 00:00:00 2001
+From: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+Date: Thu, 14 Mar 2019 14:05:18 +0100
+Subject: [PATCH] phy: sun4i-usb: Make sure to disable PHY0 passby for peripheral mode
+Git-commit: e6f32efb1b128344a2c7df9875bc1a1abaa1d395
+Patch-mainline: v5.2-rc1
+References: bsc#1051510
+
+On platforms where the MUSB and HCI controllers share PHY0, PHY passby
+is required when using the HCI controller with the PHY, but it must be
+disabled when the MUSB controller is used instead.
+
+Without this, PHY0 passby is always enabled, which results in broken
+peripheral mode on such platforms (e.g. H3/H5).
+
+Fixes: ba4bdc9e1dc0 ("PHY: sunxi: Add driver for sunxi usb phy")
+
+Signed-off-by: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+Signed-off-by: Kishon Vijay Abraham I <kishon@ti.com>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/phy/allwinner/phy-sun4i-usb.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
+index 5163097b43df..7b23920e0068 100644
+--- a/drivers/phy/allwinner/phy-sun4i-usb.c
++++ b/drivers/phy/allwinner/phy-sun4i-usb.c
+@@ -551,6 +551,7 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
+ struct sun4i_usb_phy_data *data =
+ container_of(work, struct sun4i_usb_phy_data, detect.work);
+ struct phy *phy0 = data->phys[0].phy;
++ struct sun4i_usb_phy *phy = phy_get_drvdata(phy0);
+ bool force_session_end, id_notify = false, vbus_notify = false;
+ int id_det, vbus_det;
+
+@@ -607,6 +608,9 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
+ mutex_unlock(&phy0->mutex);
+ }
+
++ /* Enable PHY0 passby for host mode only. */
++ sun4i_usb_phy_passby(phy, !id_det);
++
+ /* Re-route PHY0 if necessary */
+ if (data->cfg->phy0_dual_route)
+ sun4i_usb_phy0_reroute(data, id_det);
+--
+2.16.4
+
diff --git a/patches.drivers/sc16is7xx-move-label-err_spi-to-correct-section.patch b/patches.drivers/sc16is7xx-move-label-err_spi-to-correct-section.patch
new file mode 100644
index 0000000000..da56092523
--- /dev/null
+++ b/patches.drivers/sc16is7xx-move-label-err_spi-to-correct-section.patch
@@ -0,0 +1,49 @@
+From e00164a0f000de893944981f41a568c981aca658 Mon Sep 17 00:00:00 2001
+From: Guoqing Jiang <gqjiang@suse.com>
+Date: Tue, 9 Apr 2019 16:16:38 +0800
+Subject: [PATCH] sc16is7xx: move label 'err_spi' to correct section
+Mime-version: 1.0
+Content-type: text/plain; charset=UTF-8
+Content-transfer-encoding: 8bit
+Git-commit: e00164a0f000de893944981f41a568c981aca658
+Patch-mainline: v5.1-rc6
+References: bsc#1051510
+
+err_spi is used when SERIAL_SC16IS7XX_SPI is enabled, so make
+the label only available under SERIAL_SC16IS7XX_SPI option.
+Otherwise, the below warning appears.
+
+drivers/tty/serial/sc16is7xx.c:1523:1: warning: label ‘err_spi’ defined but not used [-Wunused-label]
+ err_spi:
+ ^~~~~~~
+
+Signed-off-by: Guoqing Jiang <gqjiang@suse.com>
+Fixes: ac0cdb3d9901 ("sc16is7xx: missing unregister/delete driver on error in sc16is7xx_init()")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/tty/serial/sc16is7xx.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index 09a183dfc526..22381a8c72e4 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -1520,10 +1520,12 @@ static int __init sc16is7xx_init(void)
+ #endif
+ return ret;
+
++#ifdef CONFIG_SERIAL_SC16IS7XX_SPI
+ err_spi:
+ #ifdef CONFIG_SERIAL_SC16IS7XX_I2C
+ i2c_del_driver(&sc16is7xx_i2c_uart_driver);
+ #endif
++#endif
+ err_i2c:
+ uart_unregister_driver(&sc16is7xx_uart);
+ return ret;
+--
+2.16.4
+
diff --git a/patches.drivers/sc16is7xx-put-err_spi-and-err_i2c-into-correct-ifdef.patch b/patches.drivers/sc16is7xx-put-err_spi-and-err_i2c-into-correct-ifdef.patch
new file mode 100644
index 0000000000..9106802a4e
--- /dev/null
+++ b/patches.drivers/sc16is7xx-put-err_spi-and-err_i2c-into-correct-ifdef.patch
@@ -0,0 +1,45 @@
+From c53051128bb0e8754e13345d782ca69e5e1ce36d Mon Sep 17 00:00:00 2001
+From: Guoqing Jiang <gqjiang@suse.com>
+Date: Thu, 18 Apr 2019 10:01:55 +0800
+Subject: [PATCH] sc16is7xx: put err_spi and err_i2c into correct #ifdef
+Git-commit: c53051128bb0e8754e13345d782ca69e5e1ce36d
+Patch-mainline: v5.1-rc6
+References: bsc#1051510
+
+err_spi is only called within SERIAL_SC16IS7XX_SPI
+while err_i2c is called inside SERIAL_SC16IS7XX_I2C.
+So we need to put err_spi and err_i2c into each #ifdef
+accordingly.
+
+This change fixes ("sc16is7xx: move label 'err_spi'
+to correct section").
+
+Signed-off-by: Guoqing Jiang <gqjiang@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/tty/serial/sc16is7xx.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index 22381a8c72e4..a31db15cd7c0 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -1522,11 +1522,11 @@ static int __init sc16is7xx_init(void)
+
+ #ifdef CONFIG_SERIAL_SC16IS7XX_SPI
+ err_spi:
++#endif
+ #ifdef CONFIG_SERIAL_SC16IS7XX_I2C
+ i2c_del_driver(&sc16is7xx_i2c_uart_driver);
+-#endif
+-#endif
+ err_i2c:
++#endif
+ uart_unregister_driver(&sc16is7xx_uart);
+ return ret;
+ }
+--
+2.16.4
+
diff --git a/patches.drm/0004-drm-i915-gvt-Fix-incorrect-mask-of-mmio-0x22028-in-g.patch b/patches.drm/0004-drm-i915-gvt-Fix-incorrect-mask-of-mmio-0x22028-in-g.patch
new file mode 100644
index 0000000000..63913ed9a6
--- /dev/null
+++ b/patches.drm/0004-drm-i915-gvt-Fix-incorrect-mask-of-mmio-0x22028-in-g.patch
@@ -0,0 +1,39 @@
+From 2bfc4975083ace0e5777116514c3a75e59b3dbcd Mon Sep 17 00:00:00 2001
+From: Colin Xu <colin.xu@intel.com>
+Date: Mon, 1 Apr 2019 14:13:53 +0800
+Subject: drm/i915/gvt: Fix incorrect mask of mmio 0x22028 in gen8/9 mmio list
+Git-commit: 2bfc4975083ace0e5777116514c3a75e59b3dbcd
+Patch-mainline: v5.2-rc1
+References: bnc#1113722
+
+According to GFX PRM on 01.org, bit 31:16 of mmio 0x22028 should be masks.
+
+Fixes: 178657139307 ("drm/i915/gvt: vGPU context switch")
+Reviewed-by: Zhenyu Wang <zhenyuw@linux.intel.com>
+Signed-off-by: Colin Xu <colin.xu@intel.com>
+Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
+Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
+---
+ drivers/gpu/drm/i915/gvt/mmio_context.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
++++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
+@@ -79,7 +79,7 @@ static struct engine_mmio gen8_engine_mm
+ {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
+ {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
+ {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
+- {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
++ {BCS, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */
+ {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
+ };
+
+@@ -130,7 +130,7 @@ static struct engine_mmio gen9_engine_mm
+ {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
+ {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
+ {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
+- {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
++ {BCS, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */
+
+ {VCS2, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
+
diff --git a/patches.drm/0005-drm-meson-add-size-and-alignment-requirements-for-du.patch b/patches.drm/0005-drm-meson-add-size-and-alignment-requirements-for-du.patch
new file mode 100644
index 0000000000..65e89c9db0
--- /dev/null
+++ b/patches.drm/0005-drm-meson-add-size-and-alignment-requirements-for-du.patch
@@ -0,0 +1,53 @@
+From 852ce7285c99e3f7b56e76511e1b33c645a2b648 Mon Sep 17 00:00:00 2001
+From: Neil Armstrong <narmstrong@baylibre.com>
+Date: Mon, 8 Apr 2019 11:01:37 +0200
+Subject: drm/meson: add size and alignment requirements for dumb buffers
+Git-commit: 852ce7285c99e3f7b56e76511e1b33c645a2b648
+Patch-mainline: v5.2-rc1
+References: bnc#1113722
+
+The Amlogic SoCs Canvas buffers stride must be aligned on 64bytes
+and overall size should be aligned on PAGE width.
+
+Adds a custom dumb_create op to adds these requirements.
+
+Fixes: bbbe775ec5b5 ("drm: Add support for Amlogic Meson Graphic Controller")
+Suggested-by: Sky Zhou <sky.zhou@amlogic.com>
+Signed-off-by: Neil Armstrong <narmstrong@baylibre.com>
+Reviewed-by: Sky Zhou <sky.zhou@amlogic.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190408090137.2402-1-narmstrong@baylibre.com
+Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
+---
+ drivers/gpu/drm/meson/meson_drv.c | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/meson/meson_drv.c
++++ b/drivers/gpu/drm/meson/meson_drv.c
+@@ -93,6 +93,18 @@ static irqreturn_t meson_irq(int irq, vo
+ return IRQ_HANDLED;
+ }
+
++static int meson_dumb_create(struct drm_file *file, struct drm_device *dev,
++ struct drm_mode_create_dumb *args)
++{
++ /*
++ * We need 64bytes aligned stride, and PAGE aligned size
++ */
++ args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), SZ_64);
++ args->size = PAGE_ALIGN(args->pitch * args->height);
++
++ return drm_gem_cma_dumb_create_internal(file, dev, args);
++}
++
+ DEFINE_DRM_GEM_CMA_FOPS(fops);
+
+ static struct drm_driver meson_driver = {
+@@ -115,7 +127,7 @@ static struct drm_driver meson_driver =
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+
+ /* GEM Ops */
+- .dumb_create = drm_gem_cma_dumb_create,
++ .dumb_create = meson_dumb_create,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
diff --git a/patches.fixes/0001-btrfs-qgroup-Move-reserved-data-accounting-from-btrf.patch b/patches.fixes/0001-btrfs-qgroup-Move-reserved-data-accounting-from-btrf.patch
index ef91535590..ca0ac6e584 100644
--- a/patches.fixes/0001-btrfs-qgroup-Move-reserved-data-accounting-from-btrf.patch
+++ b/patches.fixes/0001-btrfs-qgroup-Move-reserved-data-accounting-from-btrf.patch
@@ -102,17 +102,17 @@ Fixes: f64d5ca86821 ("btrfs: delayed_ref: Add new function to record reserved sp
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
---
- fs/btrfs/delayed-ref.c | 12 ++++--------
+ fs/btrfs/delayed-ref.c | 14 ++++----------
fs/btrfs/delayed-ref.h | 11 -----------
fs/btrfs/extent-tree.c | 3 ---
fs/btrfs/qgroup.c | 19 +++++++++++++++----
fs/btrfs/qgroup.h | 18 +++++++++++-------
include/trace/events/btrfs.h | 29 -----------------------------
- 6 files changed, 30 insertions(+), 62 deletions(-)
+ 6 files changed, 30 insertions(+), 64 deletions(-)
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
-@@ -601,16 +601,14 @@ add_delayed_ref_head(struct btrfs_fs_inf
+@@ -590,15 +590,13 @@ static void init_delayed_ref_head(struct
RB_CLEAR_NODE(&head_ref->href_node);
head_ref->processing = 0;
head_ref->total_ref_mod = count_mod;
@@ -121,7 +121,6 @@ Signed-off-by: David Sterba <dsterba@suse.com>
spin_lock_init(&head_ref->lock);
mutex_init(&head_ref->mutex);
- /* Record qgroup extent info if provided */
if (qrecord) {
if (ref_root && reserved) {
- head_ref->qgroup_ref_root = ref_root;
@@ -131,16 +130,18 @@ Signed-off-by: David Sterba <dsterba@suse.com>
}
qrecord->bytenr = bytenr;
-@@ -629,8 +627,6 @@ add_delayed_ref_head(struct btrfs_fs_inf
+@@ -641,10 +639,6 @@ add_delayed_ref_head(struct btrfs_fs_inf
existing = htree_insert(&delayed_refs->href_root,
&head_ref->href_node);
if (existing) {
-- WARN_ON(ref_root && reserved && existing->qgroup_ref_root
+- WARN_ON(qrecord && head_ref->qgroup_ref_root
+- && head_ref->qgroup_reserved
+- && existing->qgroup_ref_root
- && existing->qgroup_reserved);
update_existing_head_ref(delayed_refs, existing, head_ref,
old_ref_mod);
/*
-@@ -797,7 +793,7 @@ int btrfs_add_delayed_tree_ref(struct bt
+@@ -764,7 +758,7 @@ int btrfs_add_delayed_tree_ref(struct bt
if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
is_fstree(ref_root)) {
@@ -149,7 +150,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
if (!record)
goto free_head_ref;
}
-@@ -860,7 +856,7 @@ int btrfs_add_delayed_data_ref(struct bt
+@@ -848,7 +842,7 @@ int btrfs_add_delayed_data_ref(struct bt
if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
is_fstree(ref_root)) {
@@ -160,7 +161,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
kmem_cache_free(btrfs_delayed_ref_head_cachep,
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
-@@ -116,17 +116,6 @@ struct btrfs_delayed_ref_head {
+@@ -115,17 +115,6 @@ struct btrfs_delayed_ref_head {
int ref_mod;
/*
@@ -180,7 +181,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
* until the delayed ref is processed. must_insert_reserved is
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
-@@ -2564,9 +2564,6 @@ static int cleanup_ref_head(struct btrfs
+@@ -2563,9 +2563,6 @@ static int cleanup_ref_head(struct btrfs
}
}
diff --git a/patches.fixes/0001-dt-bindings-net-Fix-a-typo-in-the-phy-mode-list-for-.patch b/patches.fixes/0001-dt-bindings-net-Fix-a-typo-in-the-phy-mode-list-for-.patch
new file mode 100644
index 0000000000..af7026daec
--- /dev/null
+++ b/patches.fixes/0001-dt-bindings-net-Fix-a-typo-in-the-phy-mode-list-for-.patch
@@ -0,0 +1,37 @@
+From 822dd046d7e22a8d01728200a003da230e4c6f7f Mon Sep 17 00:00:00 2001
+From: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Date: Tue, 7 May 2019 17:35:55 +0200
+Subject: [PATCH] dt-bindings: net: Fix a typo in the phy-mode list for
+ ethernet bindings
+Git-commit: 822dd046d7e22a8d01728200a003da230e4c6f7f
+Patch-mainline: v5.1
+References: bsc#1129770
+
+The phy_mode "2000base-x" is actually supposed to be "1000base-x", even
+though the commit title of the original patch says otherwise.
+
+Fixes: 55601a880690 ("net: phy: Add 2000base-x, 2500base-x and rxaui modes")
+Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Reviewed-by: Fabian Baumanis <fabian.baumanis@suse.com>
+---
+ Documentation/devicetree/bindings/net/ethernet.txt | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt
+index a68621580584..d45b5b56fa39 100644
+--- a/Documentation/devicetree/bindings/net/ethernet.txt
++++ b/Documentation/devicetree/bindings/net/ethernet.txt
+@@ -36,7 +36,7 @@ Documentation/devicetree/bindings/phy/phy-bindings.txt.
+ * "smii"
+ * "xgmii"
+ * "trgmii"
+- * "2000base-x",
++ * "1000base-x",
+ * "2500base-x",
+ * "rxaui"
+ * "xaui"
+--
+2.16.4
+
diff --git a/patches.fixes/net-rds-force-to-destroy-connection-if-t_sock-is-NUL.patch b/patches.fixes/net-rds-force-to-destroy-connection-if-t_sock-is-NUL.patch
new file mode 100644
index 0000000000..bb5d3efa51
--- /dev/null
+++ b/patches.fixes/net-rds-force-to-destroy-connection-if-t_sock-is-NUL.patch
@@ -0,0 +1,143 @@
+From: Mao Wenan <maowenan@huawei.com>
+Date: Thu, 28 Mar 2019 17:10:56 +0800
+Subject: net: rds: force to destroy connection if t_sock is NULL in rds_tcp_kill_sock().
+Patch-mainline: v5.1-rc4
+Git-commit: cb66ddd156203daefb8d71158036b27b0e2caf63
+References: CVE-2019-11815 bsc#1134537
+
+When it is to cleanup net namespace, rds_tcp_exit_net() will call
+rds_tcp_kill_sock(), if t_sock is NULL, it will not call
+rds_conn_destroy(), rds_conn_path_destroy() and rds_tcp_conn_free() to free
+connection, and the worker cp_conn_w is not stopped, afterwards the net is freed in
+net_drop_ns(); While cp_conn_w rds_connect_worker() will call rds_tcp_conn_path_connect()
+and reference 'net' which has already been freed.
+
+In rds_tcp_conn_path_connect(), rds_tcp_set_callbacks() will set t_sock = sock before
+sock->ops->connect, but if connect() is failed, it will call
+rds_tcp_restore_callbacks() and set t_sock = NULL, if connect is always
+failed, rds_connect_worker() will try to reconnect all the time, so
+rds_tcp_kill_sock() will never to cancel worker cp_conn_w and free the
+connections.
+
+Therefore, the condition !tc->t_sock is not needed if it is going to do
+cleanup_net->rds_tcp_exit_net->rds_tcp_kill_sock, because tc->t_sock is always
+NULL, and there is on other path to cancel cp_conn_w and free
+connection. So this patch is to fix this.
+
+rds_tcp_kill_sock():
+...
+if (net != c_net || !tc->t_sock)
+...
+Acked-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
+
+==================================================================
+BUG: KASAN: use-after-free in inet_create+0xbcc/0xd28
+net/ipv4/af_inet.c:340
+Read of size 4 at addr ffff8003496a4684 by task kworker/u8:4/3721
+
+CPU: 3 PID: 3721 Comm: kworker/u8:4 Not tainted 5.1.0 #11
+Hardware name: linux,dummy-virt (DT)
+Workqueue: krdsd rds_connect_worker
+Call trace:
+ dump_backtrace+0x0/0x3c0 arch/arm64/kernel/time.c:53
+ show_stack+0x28/0x38 arch/arm64/kernel/traps.c:152
+ __dump_stack lib/dump_stack.c:77 [inline]
+ dump_stack+0x120/0x188 lib/dump_stack.c:113
+ print_address_description+0x68/0x278 mm/kasan/report.c:253
+ kasan_report_error mm/kasan/report.c:351 [inline]
+ kasan_report+0x21c/0x348 mm/kasan/report.c:409
+ __asan_report_load4_noabort+0x30/0x40 mm/kasan/report.c:429
+ inet_create+0xbcc/0xd28 net/ipv4/af_inet.c:340
+ __sock_create+0x4f8/0x770 net/socket.c:1276
+ sock_create_kern+0x50/0x68 net/socket.c:1322
+ rds_tcp_conn_path_connect+0x2b4/0x690 net/rds/tcp_connect.c:114
+ rds_connect_worker+0x108/0x1d0 net/rds/threads.c:175
+ process_one_work+0x6e8/0x1700 kernel/workqueue.c:2153
+ worker_thread+0x3b0/0xdd0 kernel/workqueue.c:2296
+ kthread+0x2f0/0x378 kernel/kthread.c:255
+ ret_from_fork+0x10/0x18 arch/arm64/kernel/entry.S:1117
+
+Allocated by task 687:
+ save_stack mm/kasan/kasan.c:448 [inline]
+ set_track mm/kasan/kasan.c:460 [inline]
+ kasan_kmalloc+0xd4/0x180 mm/kasan/kasan.c:553
+ kasan_slab_alloc+0x14/0x20 mm/kasan/kasan.c:490
+ slab_post_alloc_hook mm/slab.h:444 [inline]
+ slab_alloc_node mm/slub.c:2705 [inline]
+ slab_alloc mm/slub.c:2713 [inline]
+ kmem_cache_alloc+0x14c/0x388 mm/slub.c:2718
+ kmem_cache_zalloc include/linux/slab.h:697 [inline]
+ net_alloc net/core/net_namespace.c:384 [inline]
+ copy_net_ns+0xc4/0x2d0 net/core/net_namespace.c:424
+ create_new_namespaces+0x300/0x658 kernel/nsproxy.c:107
+ unshare_nsproxy_namespaces+0xa0/0x198 kernel/nsproxy.c:206
+ ksys_unshare+0x340/0x628 kernel/fork.c:2577
+ __do_sys_unshare kernel/fork.c:2645 [inline]
+ __se_sys_unshare kernel/fork.c:2643 [inline]
+ __arm64_sys_unshare+0x38/0x58 kernel/fork.c:2643
+ __invoke_syscall arch/arm64/kernel/syscall.c:35 [inline]
+ invoke_syscall arch/arm64/kernel/syscall.c:47 [inline]
+ el0_svc_common+0x168/0x390 arch/arm64/kernel/syscall.c:83
+ el0_svc_handler+0x60/0xd0 arch/arm64/kernel/syscall.c:129
+ el0_svc+0x8/0xc arch/arm64/kernel/entry.S:960
+
+Freed by task 264:
+ save_stack mm/kasan/kasan.c:448 [inline]
+ set_track mm/kasan/kasan.c:460 [inline]
+ __kasan_slab_free+0x114/0x220 mm/kasan/kasan.c:521
+ kasan_slab_free+0x10/0x18 mm/kasan/kasan.c:528
+ slab_free_hook mm/slub.c:1370 [inline]
+ slab_free_freelist_hook mm/slub.c:1397 [inline]
+ slab_free mm/slub.c:2952 [inline]
+ kmem_cache_free+0xb8/0x3a8 mm/slub.c:2968
+ net_free net/core/net_namespace.c:400 [inline]
+ net_drop_ns.part.6+0x78/0x90 net/core/net_namespace.c:407
+ net_drop_ns net/core/net_namespace.c:406 [inline]
+ cleanup_net+0x53c/0x6d8 net/core/net_namespace.c:569
+ process_one_work+0x6e8/0x1700 kernel/workqueue.c:2153
+ worker_thread+0x3b0/0xdd0 kernel/workqueue.c:2296
+ kthread+0x2f0/0x378 kernel/kthread.c:255
+ ret_from_fork+0x10/0x18 arch/arm64/kernel/entry.S:1117
+
+The buggy address belongs to the object at ffff8003496a3f80
+ which belongs to the cache net_namespace of size 7872
+The buggy address is located 1796 bytes inside of
+ 7872-byte region [ffff8003496a3f80, ffff8003496a5e40)
+The buggy address belongs to the page:
+page:ffff7e000d25a800 count:1 mapcount:0 mapping:ffff80036ce4b000
+index:0x0 compound_mapcount: 0
+flags: 0xffffe0000008100(slab|head)
+raw: 0ffffe0000008100 dead000000000100 dead000000000200 ffff80036ce4b000
+raw: 0000000000000000 0000000080040004 00000001ffffffff 0000000000000000
+page dumped because: kasan: bad access detected
+
+Memory state around the buggy address:
+ ffff8003496a4580: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ ffff8003496a4600: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+>ffff8003496a4680: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ ^
+ ffff8003496a4700: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ ffff8003496a4780: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+==================================================================
+
+Fixes: 467fa15356ac("RDS-TCP: Support multiple RDS-TCP listen endpoints, one per netns.")
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Mao Wenan <maowenan@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Michal Kubecek <mkubecek@suse.cz>
+
+---
+ net/rds/tcp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/rds/tcp.c
++++ b/net/rds/tcp.c
+@@ -509,7 +509,7 @@ static void rds_tcp_kill_sock(struct net *net)
+ list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
+ struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
+
+- if (net != c_net || !tc->t_sock)
++ if (net != c_net)
+ continue;
+ if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
+ list_move_tail(&tc->t_tcp_node, &tmp_list);
diff --git a/patches.fixes/nvme-multipath-avoid-crash-on-invalid-subsystem-cntl.patch b/patches.fixes/nvme-multipath-avoid-crash-on-invalid-subsystem-cntl.patch
index 84190b3834..f066dbf5a6 100644
--- a/patches.fixes/nvme-multipath-avoid-crash-on-invalid-subsystem-cntl.patch
+++ b/patches.fixes/nvme-multipath-avoid-crash-on-invalid-subsystem-cntl.patch
@@ -1,8 +1,10 @@
+From 8a03b27ea61c2ab9de16a8a195822ef05e799748 Mon Sep 17 00:00:00 2001
From: Hannes Reinecke <hare@suse.de>
-Date: Fri May 3 15:37:35 2019 +0200
+Date: Fri, 3 May 2019 15:37:35 +0200
Subject: [PATCH] nvme-multipath: avoid crash on invalid subsystem cntlid
enumeration
-Git-commit: e2296434d80e1717695f0dff9f04fb9b7ee0fed0
+
+Git-commit: 8a03b27ea61c2ab9de16a8a195822ef05e799748
Patch-Mainline: queued in subsystem maintainer repository
Git-repo: git://git.infradead.org/nvme.git
References: bsc#1129273
@@ -20,7 +22,7 @@ Signed-off-by: Christoph Hellwig <hch@lst.de>
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
-index f0716f6ce41f..2551264ef2b5 100644
+index 5c9429d41120..499acf07d61a 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -31,7 +31,7 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
@@ -33,5 +35,5 @@ index f0716f6ce41f..2551264ef2b5 100644
} else {
sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
--
-2.16.4
+2.20.1
diff --git a/patches.fixes/vfio-mdev-Avoid-release-parent-reference-during-erro.patch b/patches.fixes/vfio-mdev-Avoid-release-parent-reference-during-erro.patch
new file mode 100644
index 0000000000..f66cb88c0a
--- /dev/null
+++ b/patches.fixes/vfio-mdev-Avoid-release-parent-reference-during-erro.patch
@@ -0,0 +1,40 @@
+From 60e7f2c3fe9919cee9534b422865eed49f4efb15 Mon Sep 17 00:00:00 2001
+From: Parav Pandit <parav@mellanox.com>
+Date: Tue, 30 Apr 2019 17:49:28 -0500
+Subject: [PATCH] vfio/mdev: Avoid release parent reference during error path
+Git-commit: 60e7f2c3fe9919cee9534b422865eed49f4efb15
+Patch-mainline: v5.2-rc1
+References: bsc#1051510
+
+During mdev parent registration in mdev_register_device(),
+if parent device is duplicate, it releases the reference of existing
+parent device.
+This is incorrect. Existing parent device should not be touched.
+
+Fixes: 7b96953bc640 ("vfio: Mediated device Core driver")
+Reviewed-by: Cornelia Huck <cohuck@redhat.com>
+Reviewed-by: Kirti Wankhede <kwankhede@nvidia.com>
+Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
+Signed-off-by: Parav Pandit <parav@mellanox.com>
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/vfio/mdev/mdev_core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c
+index b96fedc77ee5..1299d2e72ce2 100644
+--- a/drivers/vfio/mdev/mdev_core.c
++++ b/drivers/vfio/mdev/mdev_core.c
+@@ -181,6 +181,7 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops)
+ /* Check for duplicate */
+ parent = __find_parent_device(dev);
+ if (parent) {
++ parent = NULL;
+ ret = -EEXIST;
+ goto add_dev_err;
+ }
+--
+2.16.4
+
diff --git a/patches.fixes/vfio-mdev-Fix-aborting-mdev-child-device-removal-if-.patch b/patches.fixes/vfio-mdev-Fix-aborting-mdev-child-device-removal-if-.patch
new file mode 100644
index 0000000000..10f8724bda
--- /dev/null
+++ b/patches.fixes/vfio-mdev-Fix-aborting-mdev-child-device-removal-if-.patch
@@ -0,0 +1,77 @@
+From 6093e348a5e2475c5bb2e571346460f939998670 Mon Sep 17 00:00:00 2001
+From: Parav Pandit <parav@mellanox.com>
+Date: Tue, 30 Apr 2019 17:49:33 -0500
+Subject: [PATCH] vfio/mdev: Fix aborting mdev child device removal if one fails
+Git-commit: 6093e348a5e2475c5bb2e571346460f939998670
+Patch-mainline: v5.2-rc1
+References: bsc#1051510
+
+device_for_each_child() stops executing callback function for remaining
+child devices, if callback hits an error.
+Each child mdev device is independent of each other.
+While unregistering parent device, mdev core must remove all child mdev
+devices.
+Therefore, mdev_device_remove_cb() always returns success so that
+device_for_each_child doesn't abort if one child removal hits error.
+
+While at it, improve remove and unregister functions for below simplicity.
+
+There isn't need to pass forced flag pointer during mdev parent
+removal which invokes mdev_device_remove(). So simplify the flow.
+
+mdev_device_remove() is called from two paths.
+1. mdev_unregister_driver()
+ mdev_device_remove_cb()
+ mdev_device_remove()
+2. remove_store()
+ mdev_device_remove()
+
+Fixes: 7b96953bc640 ("vfio: Mediated device Core driver")
+Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
+Signed-off-by: Parav Pandit <parav@mellanox.com>
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/vfio/mdev/mdev_core.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c
+index 836d31985f14..1a317e409355 100644
+--- a/drivers/vfio/mdev/mdev_core.c
++++ b/drivers/vfio/mdev/mdev_core.c
+@@ -149,10 +149,10 @@ static int mdev_device_remove_ops(struct mdev_device *mdev, bool force_remove)
+
+ static int mdev_device_remove_cb(struct device *dev, void *data)
+ {
+- if (!dev_is_mdev(dev))
+- return 0;
++ if (dev_is_mdev(dev))
++ mdev_device_remove(dev, true);
+
+- return mdev_device_remove(dev, data ? *(bool *)data : true);
++ return 0;
+ }
+
+ /*
+@@ -240,7 +240,6 @@ EXPORT_SYMBOL(mdev_register_device);
+ void mdev_unregister_device(struct device *dev)
+ {
+ struct mdev_parent *parent;
+- bool force_remove = true;
+
+ mutex_lock(&parent_list_lock);
+ parent = __find_parent_device(dev);
+@@ -254,8 +253,7 @@ void mdev_unregister_device(struct device *dev)
+ list_del(&parent->next);
+ class_compat_remove_link(mdev_bus_compat_class, dev, NULL);
+
+- device_for_each_child(dev, (void *)&force_remove,
+- mdev_device_remove_cb);
++ device_for_each_child(dev, NULL, mdev_device_remove_cb);
+
+ parent_remove_sysfs_files(parent);
+
+--
+2.16.4
+
diff --git a/patches.fixes/vfio-pci-use-correct-format-characters.patch b/patches.fixes/vfio-pci-use-correct-format-characters.patch
new file mode 100644
index 0000000000..5cb3f7ce88
--- /dev/null
+++ b/patches.fixes/vfio-pci-use-correct-format-characters.patch
@@ -0,0 +1,83 @@
+From 426b046b748d1f47e096e05bdcc6fb4172791307 Mon Sep 17 00:00:00 2001
+From: Louis Taylor <louis@kragniz.eu>
+Date: Wed, 3 Apr 2019 12:36:20 -0600
+Subject: [PATCH] vfio/pci: use correct format characters
+Git-commit: 426b046b748d1f47e096e05bdcc6fb4172791307
+Patch-mainline: v5.1-rc4
+References: bsc#1051510
+
+When compiling with -Wformat, clang emits the following warnings:
+
+drivers/vfio/pci/vfio_pci.c:1601:5: warning: format specifies type
+ 'unsigned short' but the argument has type 'unsigned int' [-Wformat]
+ vendor, device, subvendor, subdevice,
+ ^~~~~~
+
+drivers/vfio/pci/vfio_pci.c:1601:13: warning: format specifies type
+ 'unsigned short' but the argument has type 'unsigned int' [-Wformat]
+ vendor, device, subvendor, subdevice,
+ ^~~~~~
+
+drivers/vfio/pci/vfio_pci.c:1601:21: warning: format specifies type
+ 'unsigned short' but the argument has type 'unsigned int' [-Wformat]
+ vendor, device, subvendor, subdevice,
+ ^~~~~~~~~
+
+drivers/vfio/pci/vfio_pci.c:1601:32: warning: format specifies type
+ 'unsigned short' but the argument has type 'unsigned int' [-Wformat]
+ vendor, device, subvendor, subdevice,
+ ^~~~~~~~~
+
+drivers/vfio/pci/vfio_pci.c:1605:5: warning: format specifies type
+ 'unsigned short' but the argument has type 'unsigned int' [-Wformat]
+ vendor, device, subvendor, subdevice,
+ ^~~~~~
+
+drivers/vfio/pci/vfio_pci.c:1605:13: warning: format specifies type
+ 'unsigned short' but the argument has type 'unsigned int' [-Wformat]
+ vendor, device, subvendor, subdevice,
+ ^~~~~~
+
+drivers/vfio/pci/vfio_pci.c:1605:21: warning: format specifies type
+ 'unsigned short' but the argument has type 'unsigned int' [-Wformat]
+ vendor, device, subvendor, subdevice,
+ ^~~~~~~~~
+
+drivers/vfio/pci/vfio_pci.c:1605:32: warning: format specifies type
+ 'unsigned short' but the argument has type 'unsigned int' [-Wformat]
+ vendor, device, subvendor, subdevice,
+ ^~~~~~~~~
+The types of these arguments are unconditionally defined, so this patch
+updates the format character to the correct ones for unsigned ints.
+
+Link: https://github.com/ClangBuiltLinux/linux/issues/378
+Signed-off-by: Louis Taylor <louis@kragniz.eu>
+Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/vfio/pci/vfio_pci.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index a25659b5a5d1..3fa20e95a6bb 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -1661,11 +1661,11 @@ static void __init vfio_pci_fill_ids(void)
+ rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
+ subvendor, subdevice, class, class_mask, 0);
+ if (rc)
+- pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n",
++ pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n",
+ vendor, device, subvendor, subdevice,
+ class, class_mask, rc);
+ else
+- pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n",
++ pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n",
+ vendor, device, subvendor, subdevice,
+ class, class_mask);
+ }
+--
+2.16.4
+
diff --git a/patches.fixes/vhost-vsock-fix-reset-orphans-race-with-close-timeou.patch b/patches.fixes/vhost-vsock-fix-reset-orphans-race-with-close-timeou.patch
new file mode 100644
index 0000000000..d2044723dd
--- /dev/null
+++ b/patches.fixes/vhost-vsock-fix-reset-orphans-race-with-close-timeou.patch
@@ -0,0 +1,65 @@
+From c38f57da428b033f2721b611d84b1f40bde674a8 Mon Sep 17 00:00:00 2001
+From: Stefan Hajnoczi <stefanha@redhat.com>
+Date: Thu, 6 Dec 2018 19:14:34 +0000
+Subject: [PATCH] vhost/vsock: fix reset orphans race with close timeout
+Git-commit: c38f57da428b033f2721b611d84b1f40bde674a8
+Patch-mainline: v4.20-rc6
+References: bsc#1051510
+
+If a local process has closed a connected socket and hasn't received a
+RST packet yet, then the socket remains in the table until a timeout
+expires.
+
+When a vhost_vsock instance is released with the timeout still pending,
+the socket is never freed because vhost_vsock has already set the
+SOCK_DONE flag.
+
+Check if the close timer is pending and let it close the socket. This
+prevents the race which can leak sockets.
+
+Reported-by: Maximilian Riemensberger <riemensberger@cadami.net>
+Cc: Graham Whaley <graham.whaley@gmail.com>
+Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/vhost/vsock.c | 22 +++++++++++++++-------
+ 1 file changed, 15 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
+index 34bc3ab40c6d..731e2ea2aeca 100644
+--- a/drivers/vhost/vsock.c
++++ b/drivers/vhost/vsock.c
+@@ -563,13 +563,21 @@ static void vhost_vsock_reset_orphans(struct sock *sk)
+ * executing.
+ */
+
+- if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) {
+- sock_set_flag(sk, SOCK_DONE);
+- vsk->peer_shutdown = SHUTDOWN_MASK;
+- sk->sk_state = SS_UNCONNECTED;
+- sk->sk_err = ECONNRESET;
+- sk->sk_error_report(sk);
+- }
++ /* If the peer is still valid, no need to reset connection */
++ if (vhost_vsock_get(vsk->remote_addr.svm_cid))
++ return;
++
++ /* If the close timeout is pending, let it expire. This avoids races
++ * with the timeout callback.
++ */
++ if (vsk->close_work_scheduled)
++ return;
++
++ sock_set_flag(sk, SOCK_DONE);
++ vsk->peer_shutdown = SHUTDOWN_MASK;
++ sk->sk_state = SS_UNCONNECTED;
++ sk->sk_err = ECONNRESET;
++ sk->sk_error_report(sk);
+ }
+
+ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
+--
+2.16.4
+
diff --git a/patches.fixes/vsock-virtio-fix-kernel-panic-after-device-hot-unplu.patch b/patches.fixes/vsock-virtio-fix-kernel-panic-after-device-hot-unplu.patch
new file mode 100644
index 0000000000..b6fe226c01
--- /dev/null
+++ b/patches.fixes/vsock-virtio-fix-kernel-panic-after-device-hot-unplu.patch
@@ -0,0 +1,103 @@
+From 22b5c0b63f32568e130fa2df4ba23efce3eb495b Mon Sep 17 00:00:00 2001
+From: Stefano Garzarella <sgarzare@redhat.com>
+Date: Fri, 1 Feb 2019 12:42:06 +0100
+Subject: [PATCH] vsock/virtio: fix kernel panic after device hot-unplug
+Git-commit: 22b5c0b63f32568e130fa2df4ba23efce3eb495b
+Patch-mainline: v5.0-rc6
+References: bsc#1051510
+
+virtio_vsock_remove() invokes the vsock_core_exit() also if there
+are opened sockets for the AF_VSOCK protocol family. In this way
+the vsock "transport" pointer is set to NULL, triggering the
+kernel panic at the first socket activity.
+
+This patch move the vsock_core_init()/vsock_core_exit() in the
+virtio_vsock respectively in module_init and module_exit functions,
+that cannot be invoked until there are open sockets.
+
+Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1609699
+Reported-by: Yan Fu <yafu@redhat.com>
+Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
+Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ net/vmw_vsock/virtio_transport.c | 26 ++++++++++++++++++--------
+ 1 file changed, 18 insertions(+), 8 deletions(-)
+
+diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
+index 5d3cce9e8744..9dae54698737 100644
+--- a/net/vmw_vsock/virtio_transport.c
++++ b/net/vmw_vsock/virtio_transport.c
+@@ -75,6 +75,9 @@ static u32 virtio_transport_get_local_cid(void)
+ {
+ struct virtio_vsock *vsock = virtio_vsock_get();
+
++ if (!vsock)
++ return VMADDR_CID_ANY;
++
+ return vsock->guest_cid;
+ }
+
+@@ -584,10 +587,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
+
+ virtio_vsock_update_guest_cid(vsock);
+
+- ret = vsock_core_init(&virtio_transport.transport);
+- if (ret < 0)
+- goto out_vqs;
+-
+ vsock->rx_buf_nr = 0;
+ vsock->rx_buf_max_nr = 0;
+ atomic_set(&vsock->queued_replies, 0);
+@@ -618,8 +617,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
+ mutex_unlock(&the_virtio_vsock_mutex);
+ return 0;
+
+-out_vqs:
+- vsock->vdev->config->del_vqs(vsock->vdev);
+ out:
+ kfree(vsock);
+ mutex_unlock(&the_virtio_vsock_mutex);
+@@ -669,7 +666,6 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
+
+ mutex_lock(&the_virtio_vsock_mutex);
+ the_virtio_vsock = NULL;
+- vsock_core_exit();
+ mutex_unlock(&the_virtio_vsock_mutex);
+
+ vdev->config->del_vqs(vdev);
+@@ -702,14 +698,28 @@ static int __init virtio_vsock_init(void)
+ virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0);
+ if (!virtio_vsock_workqueue)
+ return -ENOMEM;
++
+ ret = register_virtio_driver(&virtio_vsock_driver);
+ if (ret)
+- destroy_workqueue(virtio_vsock_workqueue);
++ goto out_wq;
++
++ ret = vsock_core_init(&virtio_transport.transport);
++ if (ret)
++ goto out_vdr;
++
++ return 0;
++
++out_vdr:
++ unregister_virtio_driver(&virtio_vsock_driver);
++out_wq:
++ destroy_workqueue(virtio_vsock_workqueue);
+ return ret;
++
+ }
+
+ static void __exit virtio_vsock_exit(void)
+ {
++ vsock_core_exit();
+ unregister_virtio_driver(&virtio_vsock_driver);
+ destroy_workqueue(virtio_vsock_workqueue);
+ }
+--
+2.16.4
+
diff --git a/patches.fixes/vsock-virtio-fix-kernel-panic-from-virtio_transport_.patch b/patches.fixes/vsock-virtio-fix-kernel-panic-from-virtio_transport_.patch
new file mode 100644
index 0000000000..c7fa6c3fb5
--- /dev/null
+++ b/patches.fixes/vsock-virtio-fix-kernel-panic-from-virtio_transport_.patch
@@ -0,0 +1,109 @@
+From 4c404ce23358d5d8fbdeb7a6021a9b33d3c3c167 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Adalbert=20Laz=C4=83r?= <alazar@bitdefender.com>
+Date: Wed, 6 Mar 2019 12:13:53 +0200
+Subject: [PATCH] vsock/virtio: fix kernel panic from virtio_transport_reset_no_sock
+Mime-version: 1.0
+Content-type: text/plain; charset=UTF-8
+Content-transfer-encoding: 8bit
+Git-commit: 4c404ce23358d5d8fbdeb7a6021a9b33d3c3c167
+Patch-mainline: v5.1-rc1
+References: bsc#1051510
+
+Previous to commit 22b5c0b63f32 ("vsock/virtio: fix kernel panic
+after device hot-unplug"), vsock_core_init() was called from
+virtio_vsock_probe(). Now, virtio_transport_reset_no_sock() can be called
+before vsock_core_init() has the chance to run.
+
+[Wed Feb 27 14:17:09 2019] BUG: unable to handle kernel NULL pointer dereference at 0000000000000110
+[Wed Feb 27 14:17:09 2019] #PF error: [normal kernel read fault]
+[Wed Feb 27 14:17:09 2019] PGD 0 P4D 0
+[Wed Feb 27 14:17:09 2019] Oops: 0000 [#1] SMP PTI
+[Wed Feb 27 14:17:09 2019] CPU: 3 PID: 59 Comm: kworker/3:1 Not tainted 5.0.0-rc7-390-generic-hvi #390
+[Wed Feb 27 14:17:09 2019] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Ubuntu-1.8.2-1ubuntu1 04/01/2014
+[Wed Feb 27 14:17:09 2019] Workqueue: virtio_vsock virtio_transport_rx_work [vmw_vsock_virtio_transport]
+[Wed Feb 27 14:17:09 2019] RIP: 0010:virtio_transport_reset_no_sock+0x8c/0xc0 [vmw_vsock_virtio_transport_common]
+[Wed Feb 27 14:17:09 2019] Code: 35 8b 4f 14 48 8b 57 08 31 f6 44 8b 4f 10 44 8b 07 48 8d 7d c8 e8 84 f8 ff ff 48 85 c0 48 89 c3 74 2a e8 f7 31 03 00 48 89 df <48> 8b 80 10 01 00 00 e8 68 fb 69 ed 48 8b 75 f0 65 48 33 34 25 28
+[Wed Feb 27 14:17:09 2019] RSP: 0018:ffffb42701ab7d40 EFLAGS: 00010282
+[Wed Feb 27 14:17:09 2019] RAX: 0000000000000000 RBX: ffff9d79637ee080 RCX: 0000000000000003
+[Wed Feb 27 14:17:09 2019] RDX: 0000000000000001 RSI: 0000000000000002 RDI: ffff9d79637ee080
+[Wed Feb 27 14:17:09 2019] RBP: ffffb42701ab7d78 R08: ffff9d796fae70e0 R09: ffff9d796f403500
+[Wed Feb 27 14:17:09 2019] R10: ffffb42701ab7d90 R11: 0000000000000000 R12: ffff9d7969d09240
+[Wed Feb 27 14:17:09 2019] R13: ffff9d79624e6840 R14: ffff9d7969d09318 R15: ffff9d796d48ff80
+[Wed Feb 27 14:17:09 2019] FS: 0000000000000000(0000) GS:ffff9d796fac0000(0000) knlGS:0000000000000000
+[Wed Feb 27 14:17:09 2019] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[Wed Feb 27 14:17:09 2019] CR2: 0000000000000110 CR3: 0000000427f22000 CR4: 00000000000006e0
+[Wed Feb 27 14:17:09 2019] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[Wed Feb 27 14:17:09 2019] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[Wed Feb 27 14:17:09 2019] Call Trace:
+[Wed Feb 27 14:17:09 2019] virtio_transport_recv_pkt+0x63/0x820 [vmw_vsock_virtio_transport_common]
+[Wed Feb 27 14:17:09 2019] ? kfree+0x17e/0x190
+[Wed Feb 27 14:17:09 2019] ? detach_buf_split+0x145/0x160
+[Wed Feb 27 14:17:09 2019] ? __switch_to_asm+0x40/0x70
+[Wed Feb 27 14:17:09 2019] virtio_transport_rx_work+0xa0/0x106 [vmw_vsock_virtio_transport]
+[Wed Feb 27 14:17:09 2019] NET: Registered protocol family 40
+[Wed Feb 27 14:17:09 2019] process_one_work+0x167/0x410
+[Wed Feb 27 14:17:09 2019] worker_thread+0x4d/0x460
+[Wed Feb 27 14:17:09 2019] kthread+0x105/0x140
+[Wed Feb 27 14:17:09 2019] ? rescuer_thread+0x360/0x360
+[Wed Feb 27 14:17:09 2019] ? kthread_destroy_worker+0x50/0x50
+[Wed Feb 27 14:17:09 2019] ret_from_fork+0x35/0x40
+[Wed Feb 27 14:17:09 2019] Modules linked in: vmw_vsock_virtio_transport vmw_vsock_virtio_transport_common input_leds vsock serio_raw i2c_piix4 mac_hid qemu_fw_cfg autofs4 cirrus ttm drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops virtio_net psmouse drm net_failover pata_acpi virtio_blk failover floppy
+
+Fixes: 22b5c0b63f32 ("vsock/virtio: fix kernel panic after device hot-unplug")
+Reported-by: Alexandru Herghelegiu <aherghelegiu@bitdefender.com>
+Signed-off-by: Adalbert Lazăr <alazar@bitdefender.com>
+Co-developed-by: Stefan Hajnoczi <stefanha@redhat.com>
+Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
+Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ net/vmw_vsock/virtio_transport_common.c | 22 +++++++++++++++-------
+ 1 file changed, 15 insertions(+), 7 deletions(-)
+
+diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
+index 3ae3a33da70b..602715fc9a75 100644
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -662,6 +662,8 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
+ */
+ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
+ {
++ const struct virtio_transport *t;
++ struct virtio_vsock_pkt *reply;
+ struct virtio_vsock_pkt_info info = {
+ .op = VIRTIO_VSOCK_OP_RST,
+ .type = le16_to_cpu(pkt->hdr.type),
+@@ -672,15 +674,21 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
+ if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
+ return 0;
+
+- pkt = virtio_transport_alloc_pkt(&info, 0,
+- le64_to_cpu(pkt->hdr.dst_cid),
+- le32_to_cpu(pkt->hdr.dst_port),
+- le64_to_cpu(pkt->hdr.src_cid),
+- le32_to_cpu(pkt->hdr.src_port));
+- if (!pkt)
++ reply = virtio_transport_alloc_pkt(&info, 0,
++ le64_to_cpu(pkt->hdr.dst_cid),
++ le32_to_cpu(pkt->hdr.dst_port),
++ le64_to_cpu(pkt->hdr.src_cid),
++ le32_to_cpu(pkt->hdr.src_port));
++ if (!reply)
+ return -ENOMEM;
+
+- return virtio_transport_get_ops()->send_pkt(pkt);
++ t = virtio_transport_get_ops();
++ if (!t) {
++ virtio_transport_free_pkt(reply);
++ return -ENOTCONN;
++ }
++
++ return t->send_pkt(reply);
+ }
+
+ static void virtio_transport_wait_close(struct sock *sk, long timeout)
+--
+2.16.4
+
diff --git a/patches.fixes/vsock-virtio-reset-connected-sockets-on-device-remov.patch b/patches.fixes/vsock-virtio-reset-connected-sockets-on-device-remov.patch
new file mode 100644
index 0000000000..13a23d699f
--- /dev/null
+++ b/patches.fixes/vsock-virtio-reset-connected-sockets-on-device-remov.patch
@@ -0,0 +1,37 @@
+From 85965487abc540368393a15491e6e7fcd230039d Mon Sep 17 00:00:00 2001
+From: Stefano Garzarella <sgarzare@redhat.com>
+Date: Fri, 1 Feb 2019 12:42:07 +0100
+Subject: [PATCH] vsock/virtio: reset connected sockets on device removal
+Git-commit: 85965487abc540368393a15491e6e7fcd230039d
+Patch-mainline: v5.0-rc6
+References: bsc#1051510
+
+When the virtio transport device disappear, we should reset all
+connected sockets in order to inform the users.
+
+Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
+Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ net/vmw_vsock/virtio_transport.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
+index 9dae54698737..15eb5d3d4750 100644
+--- a/net/vmw_vsock/virtio_transport.c
++++ b/net/vmw_vsock/virtio_transport.c
+@@ -634,6 +634,9 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
+ flush_work(&vsock->event_work);
+ flush_work(&vsock->send_pkt_work);
+
++ /* Reset all connected sockets when the device disappear */
++ vsock_for_each_connected_socket(virtio_vsock_reset_sock);
++
+ vdev->config->reset(vdev);
+
+ mutex_lock(&vsock->rx_lock);
+--
+2.16.4
+
diff --git a/patches.suse/0001-btrfs-Factor-out-common-delayed-refs-init-code.patch b/patches.suse/0001-btrfs-Factor-out-common-delayed-refs-init-code.patch
new file mode 100644
index 0000000000..7683027899
--- /dev/null
+++ b/patches.suse/0001-btrfs-Factor-out-common-delayed-refs-init-code.patch
@@ -0,0 +1,85 @@
+From cb49a87b2a4edb469e4d295eca4b1d106f64083e Mon Sep 17 00:00:00 2001
+From: Nikolay Borisov <nborisov@suse.com>
+Date: Tue, 24 Apr 2018 17:18:17 +0300
+Git-commit: cb49a87b2a4edb469e4d295eca4b1d106f64083e
+Patch-mainline: v4.18
+References: bsc#1134813
+Subject: [PATCH 1/8] btrfs: Factor out common delayed refs init code
+
+THe majority of the init code for struct btrfs_delayed_ref_node is
+duplicated in add_delayed_data_ref and add_delayed_tree_ref. Factor out
+the common bits in init_delayed_ref_common. This function is going to be
+used in future patches to clean that up. No functional changes.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/delayed-ref.c | 51 ++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 51 insertions(+)
+
+diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
+index 4fb041e14742..a0dc255792c7 100644
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -644,6 +644,57 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
+ return head_ref;
+ }
+
++/*
++ * init_delayed_ref_common - Initialize the structure which represents a
++ * modification to a an extent.
++ *
++ * @fs_info: Internal to the mounted filesystem mount structure.
++ *
++ * @ref: The structure which is going to be initialized.
++ *
++ * @bytenr: The logical address of the extent for which a modification is
++ * going to be recorded.
++ *
++ * @num_bytes: Size of the extent whose modification is being recorded.
++ *
++ * @ref_root: The id of the root where this modification has originated, this
++ * can be either one of the well-known metadata trees or the
++ * subvolume id which references this extent.
++ *
++ * @action: Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
++ * BTRFS_ADD_DELAYED_EXTENT
++ *
++ * @ref_type: Holds the type of the extent which is being recorded, can be
++ * one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
++ * when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
++ * BTRFS_EXTENT_DATA_REF_KEY when recording data extent
++ */
++static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
++ struct btrfs_delayed_ref_node *ref,
++ u64 bytenr, u64 num_bytes, u64 ref_root,
++ int action, u8 ref_type)
++{
++ u64 seq = 0;
++
++ if (action == BTRFS_ADD_DELAYED_EXTENT)
++ action = BTRFS_ADD_DELAYED_REF;
++
++ if (is_fstree(ref_root))
++ seq = atomic64_read(&fs_info->tree_mod_seq);
++
++ refcount_set(&ref->refs, 1);
++ ref->bytenr = bytenr;
++ ref->num_bytes = num_bytes;
++ ref->ref_mod = 1;
++ ref->action = action;
++ ref->is_head = 0;
++ ref->in_tree = 1;
++ ref->seq = seq;
++ ref->type = ref_type;
++ RB_CLEAR_NODE(&ref->ref_node);
++ INIT_LIST_HEAD(&ref->add_list);
++}
++
+ /*
+ * helper to insert a delayed tree ref into the rbtree.
+ */
+--
+2.21.0
+
diff --git a/patches.suse/0001-btrfs-track-refs-in-a-rb_tree-instead-of-a-list.patch b/patches.suse/0001-btrfs-track-refs-in-a-rb_tree-instead-of-a-list.patch
new file mode 100644
index 0000000000..ff33d5ddc5
--- /dev/null
+++ b/patches.suse/0001-btrfs-track-refs-in-a-rb_tree-instead-of-a-list.patch
@@ -0,0 +1,404 @@
+From 0e0adbcfdc908684317c99a9bf5e13383f03b7ec Mon Sep 17 00:00:00 2001
+From: Josef Bacik <josef@toxicpanda.com>
+Git-commit: 0e0adbcfdc908684317c99a9bf5e13383f03b7ec
+Patch-mainline: v4.15
+References: bsc#1134813
+Date: Thu, 19 Oct 2017 14:16:00 -0400
+Subject: [PATCH] btrfs: track refs in a rb_tree instead of a list
+
+If we get a significant amount of delayed refs for a single block (think
+modifying multiple snapshots) we can end up spending an ungodly amount
+of time looping through all of the entries trying to see if they can be
+merged. This is because we only add them to a list, so we have O(2n)
+for every ref head. This doesn't make any sense as we likely have refs
+for different roots, and so they cannot be merged. Tracking in a tree
+will allow us to break as soon as we hit an entry that doesn't match,
+making our worst case O(n).
+
+With this we can also merge entries more easily. Before we had to hope
+that matching refs were on the ends of our list, but with the tree we
+can search down to exact matches and merge them at insert time.
+
+Signed-off-by: Josef Bacik <jbacik@fb.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/backref.c | 5 +-
+ fs/btrfs/delayed-ref.c | 108 +++++++++++++++++++++--------------------
+ fs/btrfs/delayed-ref.h | 5 +-
+ fs/btrfs/disk-io.c | 10 ++--
+ fs/btrfs/extent-tree.c | 21 +++++---
+ 5 files changed, 82 insertions(+), 67 deletions(-)
+
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index 523d2dba7745..7d0dc100a09a 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -773,6 +773,7 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
+ struct btrfs_key key;
+ struct btrfs_key tmp_op_key;
+ struct btrfs_key *op_key = NULL;
++ struct rb_node *n;
+ int count;
+ int ret = 0;
+
+@@ -782,7 +783,9 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
+ }
+
+ spin_lock(&head->lock);
+- list_for_each_entry(node, &head->ref_list, list) {
++ for (n = rb_first(&head->ref_tree); n; n = rb_next(n)) {
++ node = rb_entry(n, struct btrfs_delayed_ref_node,
++ ref_node);
+ if (node->seq > seq)
+ continue;
+
+diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
+index 8c7d7db01f7a..83be8f9fd906 100644
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -143,6 +143,34 @@ static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
+ return NULL;
+ }
+
++static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root,
++ struct btrfs_delayed_ref_node *ins)
++{
++ struct rb_node **p = &root->rb_node;
++ struct rb_node *node = &ins->ref_node;
++ struct rb_node *parent_node = NULL;
++ struct btrfs_delayed_ref_node *entry;
++
++ while (*p) {
++ int comp;
++
++ parent_node = *p;
++ entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
++ ref_node);
++ comp = comp_refs(ins, entry, true);
++ if (comp < 0)
++ p = &(*p)->rb_left;
++ else if (comp > 0)
++ p = &(*p)->rb_right;
++ else
++ return entry;
++ }
++
++ rb_link_node(node, parent_node, p);
++ rb_insert_color(node, root);
++ return NULL;
++}
++
+ /*
+ * find an head entry based on bytenr. This returns the delayed ref
+ * head if it was able to find one, or NULL if nothing was in that spot.
+@@ -212,7 +240,8 @@ static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_node *ref)
+ {
+ assert_spin_locked(&head->lock);
+- list_del(&ref->list);
++ rb_erase(&ref->ref_node, &head->ref_tree);
++ RB_CLEAR_NODE(&ref->ref_node);
+ if (!list_empty(&ref->add_list))
+ list_del(&ref->add_list);
+ ref->in_tree = 0;
+@@ -229,24 +258,18 @@ static bool merge_ref(struct btrfs_trans_handle *trans,
+ u64 seq)
+ {
+ struct btrfs_delayed_ref_node *next;
++ struct rb_node *node = rb_next(&ref->ref_node);
+ bool done = false;
+
+- next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
+- list);
+- while (!done && &next->list != &head->ref_list) {
++ while (!done && node) {
+ int mod;
+- struct btrfs_delayed_ref_node *next2;
+-
+- next2 = list_next_entry(next, list);
+-
+- if (next == ref)
+- goto next;
+
++ next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
++ node = rb_next(node);
+ if (seq && next->seq >= seq)
+- goto next;
+-
++ break;
+ if (comp_refs(ref, next, false))
+- goto next;
++ break;
+
+ if (ref->action == next->action) {
+ mod = next->ref_mod;
+@@ -270,8 +293,6 @@ static bool merge_ref(struct btrfs_trans_handle *trans,
+ WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
+ ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
+ }
+-next:
+- next = next2;
+ }
+
+ return done;
+@@ -283,11 +304,12 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_head *head)
+ {
+ struct btrfs_delayed_ref_node *ref;
++ struct rb_node *node;
+ u64 seq = 0;
+
+ assert_spin_locked(&head->lock);
+
+- if (list_empty(&head->ref_list))
++ if (RB_EMPTY_ROOT(&head->ref_tree))
+ return;
+
+ /* We don't have too many refs to merge for data. */
+@@ -304,22 +326,13 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
+ }
+ spin_unlock(&fs_info->tree_mod_seq_lock);
+
+- ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
+- list);
+- while (&ref->list != &head->ref_list) {
++again:
++ for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
++ ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
+ if (seq && ref->seq >= seq)
+- goto next;
+-
+- if (merge_ref(trans, delayed_refs, head, ref, seq)) {
+- if (list_empty(&head->ref_list))
+- break;
+- ref = list_first_entry(&head->ref_list,
+- struct btrfs_delayed_ref_node,
+- list);
+ continue;
+- }
+-next:
+- ref = list_next_entry(ref, list);
++ if (merge_ref(trans, delayed_refs, head, ref, seq))
++ goto again;
+ }
+ }
+
+@@ -402,25 +415,19 @@ btrfs_select_ref_head(struct btrfs_trans_handle *trans)
+ * Return 0 for insert.
+ * Return >0 for merge.
+ */
+-static int
+-add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
+- struct btrfs_delayed_ref_root *root,
+- struct btrfs_delayed_ref_head *href,
+- struct btrfs_delayed_ref_node *ref)
++static int insert_delayed_ref(struct btrfs_trans_handle *trans,
++ struct btrfs_delayed_ref_root *root,
++ struct btrfs_delayed_ref_head *href,
++ struct btrfs_delayed_ref_node *ref)
+ {
+ struct btrfs_delayed_ref_node *exist;
+ int mod;
+ int ret = 0;
+
+ spin_lock(&href->lock);
+- /* Check whether we can merge the tail node with ref */
+- if (list_empty(&href->ref_list))
+- goto add_tail;
+- exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
+- list);
+- /* No need to compare bytenr nor is_head */
+- if (comp_refs(exist, ref, true))
+- goto add_tail;
++ exist = tree_insert(&href->ref_tree, ref);
++ if (!exist)
++ goto inserted;
+
+ /* Now we are sure we can merge */
+ ret = 1;
+@@ -451,9 +458,7 @@ add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
+ drop_delayed_ref(trans, root, href, exist);
+ spin_unlock(&href->lock);
+ return ret;
+-
+-add_tail:
+- list_add_tail(&ref->list, &href->ref_list);
++inserted:
+ if (ref->action == BTRFS_ADD_DELAYED_REF)
+ list_add_tail(&ref->add_list, &href->ref_add_list);
+ atomic_inc(&root->num_entries);
+@@ -593,7 +598,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
+ head_ref->ref_mod = count_mod;
+ head_ref->must_insert_reserved = must_insert_reserved;
+ head_ref->is_data = is_data;
+- INIT_LIST_HEAD(&head_ref->ref_list);
++ head_ref->ref_tree = RB_ROOT;
+ INIT_LIST_HEAD(&head_ref->ref_add_list);
+ RB_CLEAR_NODE(&head_ref->href_node);
+ head_ref->processing = 0;
+@@ -685,7 +690,7 @@ add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+ ref->is_head = 0;
+ ref->in_tree = 1;
+ ref->seq = seq;
+- INIT_LIST_HEAD(&ref->list);
++ RB_CLEAR_NODE(&ref->ref_node);
+ INIT_LIST_HEAD(&ref->add_list);
+
+ full_ref = btrfs_delayed_node_to_tree_ref(ref);
+@@ -699,7 +704,7 @@ add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+
+ trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
+
+- ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
++ ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
+
+ /*
+ * XXX: memory should be freed at the same level allocated.
+@@ -742,7 +747,7 @@ add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+ ref->is_head = 0;
+ ref->in_tree = 1;
+ ref->seq = seq;
+- INIT_LIST_HEAD(&ref->list);
++ RB_CLEAR_NODE(&ref->ref_node);
+ INIT_LIST_HEAD(&ref->add_list);
+
+ full_ref = btrfs_delayed_node_to_data_ref(ref);
+@@ -758,8 +763,7 @@ add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+
+ trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
+
+- ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
+-
++ ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
+ if (ret > 0)
+ kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
+ }
+diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
+index 1ce11858d727..a43af432f859 100644
+--- a/fs/btrfs/delayed-ref.h
++++ b/fs/btrfs/delayed-ref.h
+@@ -27,8 +27,7 @@
+ #define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */
+
+ struct btrfs_delayed_ref_node {
+- /*data/tree ref use list, stored in ref_head->ref_list. */
+- struct list_head list;
++ struct rb_node ref_node;
+ /*
+ * If action is BTRFS_ADD_DELAYED_REF, also link this node to
+ * ref_head->ref_add_list, then we do not need to iterate the
+@@ -92,7 +91,7 @@ struct btrfs_delayed_ref_head {
+ struct mutex mutex;
+
+ spinlock_t lock;
+- struct list_head ref_list;
++ struct rb_root ref_tree;
+ /* accumulate add BTRFS_ADD_DELAYED_REF nodes to this ref_add_list. */
+ struct list_head ref_add_list;
+
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index d1f396f72979..efce9a2fa9be 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -4113,7 +4113,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
+
+ while ((node = rb_first(&delayed_refs->href_root)) != NULL) {
+ struct btrfs_delayed_ref_head *head;
+- struct btrfs_delayed_ref_node *tmp;
++ struct rb_node *n;
+ bool pin_bytes = false;
+
+ head = rb_entry(node, struct btrfs_delayed_ref_head,
+@@ -4129,10 +4129,12 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
+ continue;
+ }
+ spin_lock(&head->lock);
+- list_for_each_entry_safe_reverse(ref, tmp, &head->ref_list,
+- list) {
++ while ((n = rb_first(&head->ref_tree)) != NULL) {
++ ref = rb_entry(n, struct btrfs_delayed_ref_node,
++ ref_node);
+ ref->in_tree = 0;
+- list_del(&ref->list);
++ rb_erase(&ref->ref_node, &head->ref_tree);
++ RB_CLEAR_NODE(&ref->ref_node);
+ if (!list_empty(&ref->add_list))
+ list_del(&ref->add_list);
+ atomic_dec(&delayed_refs->num_entries);
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index fc9720e28005..673ac4e01dd0 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -2519,7 +2519,7 @@ select_delayed_ref(struct btrfs_delayed_ref_head *head)
+ {
+ struct btrfs_delayed_ref_node *ref;
+
+- if (list_empty(&head->ref_list))
++ if (RB_EMPTY_ROOT(&head->ref_tree))
+ return NULL;
+
+ /*
+@@ -2532,8 +2532,8 @@ select_delayed_ref(struct btrfs_delayed_ref_head *head)
+ return list_first_entry(&head->ref_add_list,
+ struct btrfs_delayed_ref_node, add_list);
+
+- ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
+- list);
++ ref = rb_entry(rb_first(&head->ref_tree),
++ struct btrfs_delayed_ref_node, ref_node);
+ ASSERT(list_empty(&ref->add_list));
+ return ref;
+ }
+@@ -2593,7 +2593,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
+ spin_unlock(&head->lock);
+ spin_lock(&delayed_refs->lock);
+ spin_lock(&head->lock);
+- if (!list_empty(&head->ref_list) || head->extent_op) {
++ if (!RB_EMPTY_ROOT(&head->ref_tree) || head->extent_op) {
+ spin_unlock(&head->lock);
+ spin_unlock(&delayed_refs->lock);
+ return 1;
+@@ -2740,7 +2740,8 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
+
+ actual_count++;
+ ref->in_tree = 0;
+- list_del(&ref->list);
++ rb_erase(&ref->ref_node, &locked_ref->ref_tree);
++ RB_CLEAR_NODE(&ref->ref_node);
+ if (!list_empty(&ref->add_list))
+ list_del(&ref->add_list);
+ /*
+@@ -3138,6 +3139,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
+ struct btrfs_delayed_data_ref *data_ref;
+ struct btrfs_delayed_ref_root *delayed_refs;
+ struct btrfs_transaction *cur_trans;
++ struct rb_node *node;
+ int ret = 0;
+
+ cur_trans = root->fs_info->running_transaction;
+@@ -3170,7 +3172,12 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
+ spin_unlock(&delayed_refs->lock);
+
+ spin_lock(&head->lock);
+- list_for_each_entry(ref, &head->ref_list, list) {
++ /*
++ * XXX: We should replace this with a proper search function in the
++ * future.
++ */
++ for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
++ ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
+ /* If it's a shared ref we know a cross reference exists */
+ if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
+ ret = 1;
+@@ -7141,7 +7148,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
+ goto out_delayed_unlock;
+
+ spin_lock(&head->lock);
+- if (!list_empty(&head->ref_list))
++ if (!RB_EMPTY_ROOT(&head->ref_tree))
+ goto out;
+
+ if (head->extent_op) {
+--
+2.21.0
+
diff --git a/patches.suse/0001-drm-ttm-Remove-warning-about-inconsistent-mapping-in.patch b/patches.suse/0001-drm-ttm-Remove-warning-about-inconsistent-mapping-in.patch
new file mode 100644
index 0000000000..06f8d5fb5f
--- /dev/null
+++ b/patches.suse/0001-drm-ttm-Remove-warning-about-inconsistent-mapping-in.patch
@@ -0,0 +1,33 @@
+From d719a1ff8391101ba5cb0998943dfb3f77d47e7f Mon Sep 17 00:00:00 2001
+From: Thomas Zimmermann <tzimmermann@suse.de>
+Date: Tue, 14 May 2019 12:37:31 +0200
+Subject: drm/ttm: Remove warning about inconsistent mapping information
+Patch-mainline: Never, local cleanup
+References: bnc#1131488
+
+Fixing the issue of bnc1131488 requires changing a significant amount
+of the fbdev emulation. As the problem is rather cosmetical, we drop
+the warning for now.
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
+---
+ drivers/gpu/drm/ttm/ttm_bo_vm.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
+index 622dab6c4347..90a56c6724b5 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
++++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
+@@ -284,8 +284,6 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
+ struct ttm_buffer_object *bo =
+ (struct ttm_buffer_object *)vma->vm_private_data;
+
+- WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
+-
+ (void)ttm_bo_reference(bo);
+ }
+
+--
+2.21.0
+
diff --git a/patches.suse/0002-btrfs-Use-init_delayed_ref_common-in-add_delayed_tre.patch b/patches.suse/0002-btrfs-Use-init_delayed_ref_common-in-add_delayed_tre.patch
new file mode 100644
index 0000000000..c03208f122
--- /dev/null
+++ b/patches.suse/0002-btrfs-Use-init_delayed_ref_common-in-add_delayed_tre.patch
@@ -0,0 +1,75 @@
+From 646f4dd76fb3ac0d1e8677890522d4c044ee2f06 Mon Sep 17 00:00:00 2001
+From: Nikolay Borisov <nborisov@suse.com>
+Date: Tue, 24 Apr 2018 17:18:18 +0300
+Git-commit: 646f4dd76fb3ac0d1e8677890522d4c044ee2f06
+Patch-mainline: v4.18
+References: bsc#1134813
+Subject: [PATCH 2/8] btrfs: Use init_delayed_ref_common in
+ add_delayed_tree_ref
+
+Use the newly introduced common helper. No functional changes.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/delayed-ref.c | 35 +++++++++++------------------------
+ 1 file changed, 11 insertions(+), 24 deletions(-)
+
+diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
+index a0dc255792c7..1c27d3322198 100644
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -708,38 +708,25 @@ add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+ {
+ struct btrfs_delayed_tree_ref *full_ref;
+ struct btrfs_delayed_ref_root *delayed_refs;
+- u64 seq = 0;
++ u8 ref_type;
+ int ret;
+
+- if (action == BTRFS_ADD_DELAYED_EXTENT)
+- action = BTRFS_ADD_DELAYED_REF;
+-
+- if (is_fstree(ref_root))
+- seq = atomic64_read(&fs_info->tree_mod_seq);
+ delayed_refs = &trans->transaction->delayed_refs;
+-
+- /* first set the basic ref node struct up */
+- refcount_set(&ref->refs, 1);
+- ref->bytenr = bytenr;
+- ref->num_bytes = num_bytes;
+- ref->ref_mod = 1;
+- ref->action = action;
+- ref->is_head = 0;
+- ref->in_tree = 1;
+- ref->seq = seq;
+- RB_CLEAR_NODE(&ref->ref_node);
+- INIT_LIST_HEAD(&ref->add_list);
+-
+ full_ref = btrfs_delayed_node_to_tree_ref(ref);
+- full_ref->parent = parent;
+- full_ref->root = ref_root;
+ if (parent)
+- ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
++ ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
+ else
+- ref->type = BTRFS_TREE_BLOCK_REF_KEY;
++ ref_type = BTRFS_TREE_BLOCK_REF_KEY;
++
++ init_delayed_ref_common(fs_info, ref, bytenr, num_bytes, ref_root,
++ action, ref_type);
++ full_ref->root = ref_root;
++ full_ref->parent = parent;
+ full_ref->level = level;
+
+- trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
++ trace_add_delayed_tree_ref(fs_info, ref, full_ref,
++ action == BTRFS_ADD_DELAYED_EXTENT ?
++ BTRFS_ADD_DELAYED_REF : action);
+
+ ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
+
+--
+2.21.0
+
diff --git a/patches.suse/0003-btrfs-Use-init_delayed_ref_common-in-add_delayed_dat.patch b/patches.suse/0003-btrfs-Use-init_delayed_ref_common-in-add_delayed_dat.patch
new file mode 100644
index 0000000000..8396e83151
--- /dev/null
+++ b/patches.suse/0003-btrfs-Use-init_delayed_ref_common-in-add_delayed_dat.patch
@@ -0,0 +1,73 @@
+From c812c8a857a00acae78341d5d4702eb8d7d02661 Mon Sep 17 00:00:00 2001
+From: Nikolay Borisov <nborisov@suse.com>
+Date: Tue, 24 Apr 2018 17:18:19 +0300
+Git-commit: c812c8a857a00acae78341d5d4702eb8d7d02661
+Patch-mainline: v4.18
+References: bsc#1134813
+Subject: [PATCH 3/8] btrfs: Use init_delayed_ref_common in
+ add_delayed_data_ref
+
+Use the newly introduced helper and remove the duplicate code. No
+functional changes.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/delayed-ref.c | 33 ++++++++++-----------------------
+ 1 file changed, 10 insertions(+), 23 deletions(-)
+
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -768,41 +768,28 @@ add_delayed_data_ref(struct btrfs_fs_inf
+ {
+ struct btrfs_delayed_data_ref *full_ref;
+ struct btrfs_delayed_ref_root *delayed_refs;
+- u64 seq = 0;
++ u8 ref_type;
+ int ret;
+
+- if (action == BTRFS_ADD_DELAYED_EXTENT)
+- action = BTRFS_ADD_DELAYED_REF;
+-
+ delayed_refs = &trans->transaction->delayed_refs;
+
+- if (is_fstree(ref_root))
+- seq = atomic64_read(&fs_info->tree_mod_seq);
+-
+- /* first set the basic ref node struct up */
+- refcount_set(&ref->refs, 1);
+- ref->bytenr = bytenr;
+- ref->num_bytes = num_bytes;
+- ref->ref_mod = 1;
+- ref->action = action;
+- ref->is_head = 0;
+- ref->in_tree = 1;
+- ref->seq = seq;
+- RB_CLEAR_NODE(&ref->ref_node);
+- INIT_LIST_HEAD(&ref->add_list);
+
+ full_ref = btrfs_delayed_node_to_data_ref(ref);
+- full_ref->parent = parent;
+- full_ref->root = ref_root;
+ if (parent)
+- ref->type = BTRFS_SHARED_DATA_REF_KEY;
++ ref_type = BTRFS_SHARED_DATA_REF_KEY;
+ else
+- ref->type = BTRFS_EXTENT_DATA_REF_KEY;
++ ref_type = BTRFS_EXTENT_DATA_REF_KEY;
+
++ init_delayed_ref_common(fs_info, ref, bytenr, num_bytes,
++ ref_root, action, ref_type);
++ full_ref->root = ref_root;
++ full_ref->parent = parent;
+ full_ref->objectid = owner;
+ full_ref->offset = offset;
+
+- trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
++ trace_add_delayed_data_ref(fs_info, ref, full_ref,
++ action == BTRFS_ADD_DELAYED_EXTENT ?
++ BTRFS_ADD_DELAYED_REF : action);
+
+ ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
+ if (ret > 0)
diff --git a/patches.suse/0003-x86-idle-Control-Indirect-Branch-Speculation-in-idle.patch b/patches.suse/0003-x86-idle-Control-Indirect-Branch-Speculation-in-idle.patch
index bc9f09616d..14fc7dac62 100644
--- a/patches.suse/0003-x86-idle-Control-Indirect-Branch-Speculation-in-idle.patch
+++ b/patches.suse/0003-x86-idle-Control-Indirect-Branch-Speculation-in-idle.patch
@@ -20,21 +20,13 @@ Originally-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Jiri Slaby <jslaby@suse.cz>
---
- arch/x86/include/asm/mwait.h | 14 ++++++++++++++
+ arch/x86/include/asm/mwait.h | 13 +++++++++++++
arch/x86/kernel/process.c | 14 ++++++++++++++
- 2 files changed, 28 insertions(+)
+ 2 files changed, 27 insertions(+)
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
-@@ -5,6 +5,7 @@
- #include <linux/sched/idle.h>
-
- #include <asm/cpufeature.h>
-+#include <asm/nospec-branch.h>
-
- #define MWAIT_SUBSTATE_MASK 0xf
- #define MWAIT_CSTATE_MASK 0xf
-@@ -105,7 +106,20 @@ static inline void mwait_idle_with_hints
+@@ -112,7 +112,20 @@ static inline void mwait_idle_with_hints
mb();
}
@@ -57,7 +49,7 @@ Signed-off-by: Jiri Slaby <jslaby@suse.cz>
}
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
-@@ -465,6 +465,20 @@ static __cpuidle void mwait_idle(void)
+@@ -674,6 +674,20 @@ static __cpuidle void mwait_idle(void)
mb(); /* quirk */
}
diff --git a/patches.suse/0004-btrfs-Open-code-add_delayed_tree_ref.patch b/patches.suse/0004-btrfs-Open-code-add_delayed_tree_ref.patch
new file mode 100644
index 0000000000..b154cc6d0a
--- /dev/null
+++ b/patches.suse/0004-btrfs-Open-code-add_delayed_tree_ref.patch
@@ -0,0 +1,128 @@
+From 70d640004ab5c2597084f6463dd39b36f4f026f8 Mon Sep 17 00:00:00 2001
+From: Nikolay Borisov <nborisov@suse.com>
+Date: Tue, 24 Apr 2018 17:18:20 +0300
+Git-commit: 70d640004ab5c2597084f6463dd39b36f4f026f8
+Patch-mainline: v4.18
+References: bsc#1134813
+Subject: [PATCH 4/8] btrfs: Open-code add_delayed_tree_ref
+
+Now that the initialization part and the critical section code have been
+split it's a lot easier to open code add_delayed_tree_ref. Do so in the
+following manner:
+
+1. The comming init code is put immediately after memory-to-be-initialized
+ is allocated, followed by the ref-specific member initialization.
+
+2. The only piece of code that remains in the critical section is
+ insert_delayed_ref call.
+
+3. Tracing and memory freeing code is put outside of the critical
+ section as well.
+
+The only real change here is an overall shorter critical section when
+dealing with delayed tree refs. From functional point of view - the code
+is unchanged.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/delayed-ref.c | 65 +++++++++++++++----------------------------------
+ 1 file changed, 20 insertions(+), 45 deletions(-)
+
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -713,49 +713,6 @@ static void init_delayed_ref_common(stru
+ }
+
+ /*
+- * helper to insert a delayed tree ref into the rbtree.
+- */
+-static noinline void
+-add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+- struct btrfs_trans_handle *trans,
+- struct btrfs_delayed_ref_head *head_ref,
+- struct btrfs_delayed_ref_node *ref, u64 bytenr,
+- u64 num_bytes, u64 parent, u64 ref_root, int level,
+- int action)
+-{
+- struct btrfs_delayed_tree_ref *full_ref;
+- struct btrfs_delayed_ref_root *delayed_refs;
+- u8 ref_type;
+- int ret;
+-
+- delayed_refs = &trans->transaction->delayed_refs;
+- full_ref = btrfs_delayed_node_to_tree_ref(ref);
+- if (parent)
+- ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
+- else
+- ref_type = BTRFS_TREE_BLOCK_REF_KEY;
+-
+- init_delayed_ref_common(fs_info, ref, bytenr, num_bytes, ref_root,
+- action, ref_type);
+- full_ref->root = ref_root;
+- full_ref->parent = parent;
+- full_ref->level = level;
+-
+- trace_add_delayed_tree_ref(fs_info, ref, full_ref,
+- action == BTRFS_ADD_DELAYED_EXTENT ?
+- BTRFS_ADD_DELAYED_REF : action);
+-
+- ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
+-
+- /*
+- * XXX: memory should be freed at the same level allocated.
+- * But bad practice is anywhere... Follow it now. Need cleanup.
+- */
+- if (ret > 0)
+- kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
+-}
+-
+-/*
+ * helper to insert a delayed data ref into the rbtree.
+ */
+ static noinline void
+@@ -814,12 +771,24 @@ int btrfs_add_delayed_tree_ref(struct bt
+ struct btrfs_qgroup_extent_record *record = NULL;
+ int qrecord_inserted;
+ int is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
++ int ret;
++ u8 ref_type;
+
+ BUG_ON(extent_op && extent_op->is_data);
+ ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
+ if (!ref)
+ return -ENOMEM;
+
++ if (parent)
++ ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
++ else
++ ref_type = BTRFS_TREE_BLOCK_REF_KEY;
++ init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
++ ref_root, action, ref_type);
++ ref->root = ref_root;
++ ref->parent = parent;
++ ref->level = level;
++
+ head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
+ if (!head_ref)
+ goto free_ref;
+@@ -845,10 +814,16 @@ int btrfs_add_delayed_tree_ref(struct bt
+ is_system, &qrecord_inserted,
+ old_ref_mod, new_ref_mod);
+
+- add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
+- num_bytes, parent, ref_root, level, action);
++
++ ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
+ spin_unlock(&delayed_refs->lock);
+
++ trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
++ action == BTRFS_ADD_DELAYED_EXTENT ?
++ BTRFS_ADD_DELAYED_REF : action);
++ if (ret > 0)
++ kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
++
+ if (qrecord_inserted)
+ return btrfs_qgroup_trace_extent_post(fs_info, record);
+ return 0;
diff --git a/patches.suse/0005-btrfs-Open-code-add_delayed_data_ref.patch b/patches.suse/0005-btrfs-Open-code-add_delayed_data_ref.patch
new file mode 100644
index 0000000000..4bb5260040
--- /dev/null
+++ b/patches.suse/0005-btrfs-Open-code-add_delayed_data_ref.patch
@@ -0,0 +1,125 @@
+From cd7f9699b113434467434580ebb8d9b328152fb8 Mon Sep 17 00:00:00 2001
+From: Nikolay Borisov <nborisov@suse.com>
+Date: Tue, 24 Apr 2018 17:18:21 +0300
+Git-commit: cd7f9699b113434467434580ebb8d9b328152fb8
+Patch-mainline: v4.18
+References: bsc#1134813
+Subject: [PATCH 5/8] btrfs: Open-code add_delayed_data_ref
+
+Now that the initialization part and the critical section code have been
+split it's a lot easier to open code add_delayed_data_ref. Do so in the
+following manner:
+
+1. The common init function is put immediately after memory-to-be-initialized
+ is allocated, followed by the specific data ref initialization.
+
+2. The only piece of code that remains in the critical section is
+ insert_delayed_ref call.
+
+3. Tracing and memory freeing code is moved outside of the critical
+ section.
+
+No functional changes, just an overall shorter critical section.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/delayed-ref.c | 65 +++++++++++++++----------------------------------
+ 1 file changed, 21 insertions(+), 44 deletions(-)
+
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -713,47 +713,6 @@ static void init_delayed_ref_common(stru
+ }
+
+ /*
+- * helper to insert a delayed data ref into the rbtree.
+- */
+-static noinline void
+-add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+- struct btrfs_trans_handle *trans,
+- struct btrfs_delayed_ref_head *head_ref,
+- struct btrfs_delayed_ref_node *ref, u64 bytenr,
+- u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
+- u64 offset, int action)
+-{
+- struct btrfs_delayed_data_ref *full_ref;
+- struct btrfs_delayed_ref_root *delayed_refs;
+- u8 ref_type;
+- int ret;
+-
+- delayed_refs = &trans->transaction->delayed_refs;
+-
+-
+- full_ref = btrfs_delayed_node_to_data_ref(ref);
+- if (parent)
+- ref_type = BTRFS_SHARED_DATA_REF_KEY;
+- else
+- ref_type = BTRFS_EXTENT_DATA_REF_KEY;
+-
+- init_delayed_ref_common(fs_info, ref, bytenr, num_bytes,
+- ref_root, action, ref_type);
+- full_ref->root = ref_root;
+- full_ref->parent = parent;
+- full_ref->objectid = owner;
+- full_ref->offset = offset;
+-
+- trace_add_delayed_data_ref(fs_info, ref, full_ref,
+- action == BTRFS_ADD_DELAYED_EXTENT ?
+- BTRFS_ADD_DELAYED_REF : action);
+-
+- ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
+- if (ret > 0)
+- kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
+-}
+-
+-/*
+ * add a delayed tree ref. This does all of the accounting required
+ * to make sure the delayed ref is eventually processed before this
+ * transaction commits.
+@@ -851,11 +810,25 @@ int btrfs_add_delayed_data_ref(struct bt
+ struct btrfs_delayed_ref_root *delayed_refs;
+ struct btrfs_qgroup_extent_record *record = NULL;
+ int qrecord_inserted;
++ int ret;
++ u8 ref_type;
+
+ ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
+ if (!ref)
+ return -ENOMEM;
+
++ if (parent)
++ ref_type = BTRFS_SHARED_DATA_REF_KEY;
++ else
++ ref_type = BTRFS_EXTENT_DATA_REF_KEY;
++ init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
++ ref_root, action, ref_type);
++ ref->root = ref_root;
++ ref->parent = parent;
++ ref->objectid = owner;
++ ref->offset = offset;
++
++
+ head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
+ if (!head_ref) {
+ kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
+@@ -887,11 +860,15 @@ int btrfs_add_delayed_data_ref(struct bt
+ action, 1, 0, &qrecord_inserted,
+ old_ref_mod, new_ref_mod);
+
+- add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
+- num_bytes, parent, ref_root, owner, offset,
+- action);
++ ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
+ spin_unlock(&delayed_refs->lock);
+
++ trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
++ action == BTRFS_ADD_DELAYED_EXTENT ?
++ BTRFS_ADD_DELAYED_REF : action);
++ if (ret > 0)
++ kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
++
+ if (qrecord_inserted)
+ return btrfs_qgroup_trace_extent_post(fs_info, record);
+ return 0;
diff --git a/patches.suse/0006-btrfs-Introduce-init_delayed_ref_head.patch b/patches.suse/0006-btrfs-Introduce-init_delayed_ref_head.patch
new file mode 100644
index 0000000000..1593956fc0
--- /dev/null
+++ b/patches.suse/0006-btrfs-Introduce-init_delayed_ref_head.patch
@@ -0,0 +1,102 @@
+From a2e569b3f2b138f2c25b4598cf4b18af8af39abd Mon Sep 17 00:00:00 2001
+From: Nikolay Borisov <nborisov@suse.com>
+Date: Tue, 24 Apr 2018 17:18:22 +0300
+Git-commit: a2e569b3f2b138f2c25b4598cf4b18af8af39abd
+Patch-mainline: v4.18
+References: bsc#1134813
+Subject: [PATCH 6/8] btrfs: Introduce init_delayed_ref_head
+
+add_delayed_ref_head implements the logic to both initialize a head_ref
+structure as well as perform the necessary operations to add it to the
+delayed ref machinery. This has resulted in a very cumebrsome interface
+with loads of parameters and code, which at first glance, looks very
+unwieldy. Begin untangling it by first extracting the initialization
+only code in its own function. It's more or less verbatim copy of the
+first part of add_delayed_ref_head.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/delayed-ref.c | 65 ++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 65 insertions(+)
+
+diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
+index 3fa8ea5cbbc6..227094efd050 100644
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -526,6 +526,71 @@ update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
+ spin_unlock(&existing->lock);
+ }
+
++static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
++ struct btrfs_qgroup_extent_record *qrecord,
++ u64 bytenr, u64 num_bytes, u64 ref_root,
++ u64 reserved, int action, bool is_data,
++ bool is_system)
++{
++ int count_mod = 1;
++ int must_insert_reserved = 0;
++
++ /* If reserved is provided, it must be a data extent. */
++ BUG_ON(!is_data && reserved);
++
++ /*
++ * The head node stores the sum of all the mods, so dropping a ref
++ * should drop the sum in the head node by one.
++ */
++ if (action == BTRFS_UPDATE_DELAYED_HEAD)
++ count_mod = 0;
++ else if (action == BTRFS_DROP_DELAYED_REF)
++ count_mod = -1;
++
++ /*
++ * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
++ * accounting when the extent is finally added, or if a later
++ * modification deletes the delayed ref without ever inserting the
++ * extent into the extent allocation tree. ref->must_insert_reserved
++ * is the flag used to record that accounting mods are required.
++ *
++ * Once we record must_insert_reserved, switch the action to
++ * BTRFS_ADD_DELAYED_REF because other special casing is not required.
++ */
++ if (action == BTRFS_ADD_DELAYED_EXTENT)
++ must_insert_reserved = 1;
++ else
++ must_insert_reserved = 0;
++
++ refcount_set(&head_ref->refs, 1);
++ head_ref->bytenr = bytenr;
++ head_ref->num_bytes = num_bytes;
++ head_ref->ref_mod = count_mod;
++ head_ref->must_insert_reserved = must_insert_reserved;
++ head_ref->is_data = is_data;
++ head_ref->is_system = is_system;
++ head_ref->ref_tree = RB_ROOT;
++ INIT_LIST_HEAD(&head_ref->ref_add_list);
++ RB_CLEAR_NODE(&head_ref->href_node);
++ head_ref->processing = 0;
++ head_ref->total_ref_mod = count_mod;
++ head_ref->qgroup_reserved = 0;
++ head_ref->qgroup_ref_root = 0;
++ spin_lock_init(&head_ref->lock);
++ mutex_init(&head_ref->mutex);
++
++ if (qrecord) {
++ if (ref_root && reserved) {
++ head_ref->qgroup_ref_root = ref_root;
++ head_ref->qgroup_reserved = reserved;
++ }
++
++ qrecord->bytenr = bytenr;
++ qrecord->num_bytes = num_bytes;
++ qrecord->old_roots = NULL;
++ }
++}
++
+ /*
+ * helper function to actually insert a head node into the rbtree.
+ * this does all the dirty work in terms of maintaining the correct
+--
+2.21.0
+
diff --git a/patches.suse/0007-btrfs-Use-init_delayed_ref_head-in-add_delayed_ref_h.patch b/patches.suse/0007-btrfs-Use-init_delayed_ref_head-in-add_delayed_ref_h.patch
new file mode 100644
index 0000000000..d41cf73dd5
--- /dev/null
+++ b/patches.suse/0007-btrfs-Use-init_delayed_ref_head-in-add_delayed_ref_h.patch
@@ -0,0 +1,102 @@
+From eb86ec73b968b2895ffede893b33bf49bbc9bf5c Mon Sep 17 00:00:00 2001
+From: Nikolay Borisov <nborisov@suse.com>
+Date: Tue, 24 Apr 2018 17:18:23 +0300
+Git-commit: eb86ec73b968b2895ffede893b33bf49bbc9bf5c
+Patch-mainline: v4.18
+References: bsc#1134813
+Subject: [PATCH 7/8] btrfs: Use init_delayed_ref_head in add_delayed_ref_head
+
+Use the newly introduced function when initialising the head_ref in
+add_delayed_ref_head. No functional changes.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/delayed-ref.c | 61 +++----------------------------------------------
+ 1 file changed, 4 insertions(+), 57 deletions(-)
+
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -625,69 +625,16 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ {
+ struct btrfs_delayed_ref_head *existing;
+ struct btrfs_delayed_ref_root *delayed_refs;
+- int count_mod = 1;
+- int must_insert_reserved = 0;
+ int qrecord_inserted = 0;
+
+- /* If reserved is provided, it must be a data extent. */
+- BUG_ON(!is_data && reserved);
+-
+- /*
+- * the head node stores the sum of all the mods, so dropping a ref
+- * should drop the sum in the head node by one.
+- */
+- if (action == BTRFS_UPDATE_DELAYED_HEAD)
+- count_mod = 0;
+- else if (action == BTRFS_DROP_DELAYED_REF)
+- count_mod = -1;
+-
+- /*
+- * BTRFS_ADD_DELAYED_EXTENT means that we need to update
+- * the reserved accounting when the extent is finally added, or
+- * if a later modification deletes the delayed ref without ever
+- * inserting the extent into the extent allocation tree.
+- * ref->must_insert_reserved is the flag used to record
+- * that accounting mods are required.
+- *
+- * Once we record must_insert_reserved, switch the action to
+- * BTRFS_ADD_DELAYED_REF because other special casing is not required.
+- */
+- if (action == BTRFS_ADD_DELAYED_EXTENT)
+- must_insert_reserved = 1;
+- else
+- must_insert_reserved = 0;
+-
+ delayed_refs = &trans->transaction->delayed_refs;
+
+- refcount_set(&head_ref->refs, 1);
+- head_ref->bytenr = bytenr;
+- head_ref->num_bytes = num_bytes;
+- head_ref->ref_mod = count_mod;
+- head_ref->must_insert_reserved = must_insert_reserved;
+- head_ref->is_data = is_data;
+- head_ref->is_system = is_system;
+- head_ref->ref_tree = RB_ROOT;
+- INIT_LIST_HEAD(&head_ref->ref_add_list);
+- RB_CLEAR_NODE(&head_ref->href_node);
+- head_ref->processing = 0;
+- head_ref->total_ref_mod = count_mod;
+- head_ref->qgroup_reserved = 0;
+- head_ref->qgroup_ref_root = 0;
+- spin_lock_init(&head_ref->lock);
+- mutex_init(&head_ref->mutex);
++ init_delayed_ref_head(head_ref, qrecord, bytenr, num_bytes, ref_root,
++ reserved, action, is_data, is_system);
+
+ /* Record qgroup extent info if provided */
+ if (qrecord) {
+- if (ref_root && reserved) {
+- head_ref->qgroup_ref_root = ref_root;
+- head_ref->qgroup_reserved = reserved;
+- }
+-
+- qrecord->bytenr = bytenr;
+- qrecord->num_bytes = num_bytes;
+- qrecord->old_roots = NULL;
+-
+- if(btrfs_qgroup_trace_extent_nolock(fs_info,
++ if (btrfs_qgroup_trace_extent_nolock(fs_info,
+ delayed_refs, qrecord))
+ kfree(qrecord);
+ else
+@@ -712,7 +659,7 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ } else {
+ if (old_ref_mod)
+ *old_ref_mod = 0;
+- if (is_data && count_mod < 0)
++ if (is_data && head_ref->ref_mod < 0)
+ delayed_refs->pending_csums += num_bytes;
+ delayed_refs->num_heads++;
+ delayed_refs->num_heads_ready++;
diff --git a/patches.suse/0008-btrfs-split-delayed-ref-head-initialization-and-addi.patch b/patches.suse/0008-btrfs-split-delayed-ref-head-initialization-and-addi.patch
new file mode 100644
index 0000000000..7620fc154c
--- /dev/null
+++ b/patches.suse/0008-btrfs-split-delayed-ref-head-initialization-and-addi.patch
@@ -0,0 +1,159 @@
+From 2335efafa63f0c675ebb4f8908fff9e972fb8a58 Mon Sep 17 00:00:00 2001
+From: Nikolay Borisov <nborisov@suse.com>
+Date: Tue, 24 Apr 2018 17:18:24 +0300
+Git-commit: 2335efafa63f0c675ebb4f8908fff9e972fb8a58
+Patch-mainline: v4.18
+References: bsc#1134813
+Subject: [PATCH 8/8] btrfs: split delayed ref head initialization and addition
+
+add_delayed_ref_head really performed 2 independent operations -
+initialisting the ref head and adding it to a list. Now that the init
+part is in a separate function let's complete the separation between
+both operations. This results in a lot simpler interface for
+add_delayed_ref_head since the function now deals solely with either
+adding the newly initialised delayed ref head or merging it into an
+existing delayed ref head. This results in vastly simplified function
+signature since 5 arguments are dropped. The only other thing worth
+mentioning is that due to this split the WARN_ON catching reinit of
+existing. In this patch the condition is extended such that:
+
+ qrecord && head_ref->qgroup_ref_root && head_ref->qgroup_reserved
+
+is added. This is done because the two qgroup_* prefixed member are
+set only if both ref_root and reserved are passed. So functionally
+it's equivalent to the old WARN_ON and allows to remove the two args
+from add_delayed_ref_head.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/delayed-ref.c | 42 ++++++++++++++++++++----------------------
+ 1 file changed, 20 insertions(+), 22 deletions(-)
+
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -617,9 +617,7 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_head *head_ref,
+ struct btrfs_qgroup_extent_record *qrecord,
+- u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
+- int action, int is_data, int is_system,
+- int *qrecord_inserted_ret,
++ int action, int *qrecord_inserted_ret,
+ int *old_ref_mod, int *new_ref_mod)
+
+ {
+@@ -629,9 +627,6 @@ add_delayed_ref_head(struct btrfs_fs_inf
+
+ delayed_refs = &trans->transaction->delayed_refs;
+
+- init_delayed_ref_head(head_ref, qrecord, bytenr, num_bytes, ref_root,
+- reserved, action, is_data, is_system);
+-
+ /* Record qgroup extent info if provided */
+ if (qrecord) {
+ if (btrfs_qgroup_trace_extent_nolock(fs_info,
+@@ -646,7 +641,9 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ existing = htree_insert(&delayed_refs->href_root,
+ &head_ref->href_node);
+ if (existing) {
+- WARN_ON(ref_root && reserved && existing->qgroup_ref_root
++ WARN_ON(qrecord && head_ref->qgroup_ref_root
++ && head_ref->qgroup_reserved
++ && existing->qgroup_ref_root
+ && existing->qgroup_reserved);
+ update_existing_head_ref(delayed_refs, existing, head_ref,
+ old_ref_mod);
+@@ -659,8 +656,8 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ } else {
+ if (old_ref_mod)
+ *old_ref_mod = 0;
+- if (is_data && head_ref->ref_mod < 0)
+- delayed_refs->pending_csums += num_bytes;
++ if (head_ref->is_data && head_ref->ref_mod < 0)
++ delayed_refs->pending_csums += head_ref->num_bytes;
+ delayed_refs->num_heads++;
+ delayed_refs->num_heads_ready++;
+ atomic_inc(&delayed_refs->num_entries);
+@@ -670,6 +667,7 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ *qrecord_inserted_ret = qrecord_inserted;
+ if (new_ref_mod)
+ *new_ref_mod = head_ref->total_ref_mod;
++
+ return head_ref;
+ }
+
+@@ -741,7 +739,7 @@ int btrfs_add_delayed_tree_ref(struct bt
+ struct btrfs_delayed_ref_root *delayed_refs;
+ struct btrfs_qgroup_extent_record *record = NULL;
+ int qrecord_inserted;
+- int is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
++ bool is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
+ int ret;
+ u8 ref_type;
+
+@@ -771,6 +769,8 @@ int btrfs_add_delayed_tree_ref(struct bt
+ goto free_head_ref;
+ }
+
++ init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
++ ref_root, 0, action, false, is_system);
+ head_ref->extent_op = extent_op;
+
+ delayed_refs = &trans->transaction->delayed_refs;
+@@ -781,8 +781,7 @@ int btrfs_add_delayed_tree_ref(struct bt
+ * the spin lock
+ */
+ head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
+- bytenr, num_bytes, 0, 0, action, 0,
+- is_system, &qrecord_inserted,
++ action, &qrecord_inserted,
+ old_ref_mod, new_ref_mod);
+
+
+@@ -858,6 +857,8 @@ int btrfs_add_delayed_data_ref(struct bt
+ }
+ }
+
++ init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
++ reserved, action, true, false);
+ head_ref->extent_op = NULL;
+
+ delayed_refs = &trans->transaction->delayed_refs;
+@@ -868,8 +869,7 @@ int btrfs_add_delayed_data_ref(struct bt
+ * the spin lock
+ */
+ head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
+- bytenr, num_bytes, ref_root, reserved,
+- action, 1, 0, &qrecord_inserted,
++ action, &qrecord_inserted,
+ old_ref_mod, new_ref_mod);
+
+ ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
+@@ -898,19 +898,17 @@ int btrfs_add_delayed_extent_op(struct b
+ if (!head_ref)
+ return -ENOMEM;
+
++ init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
++ BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data,
++ false);
+ head_ref->extent_op = extent_op;
+
+ delayed_refs = &trans->transaction->delayed_refs;
+ spin_lock(&delayed_refs->lock);
+
+- /*
+- * extent_ops just modify the flags of an extent and they don't result
+- * in ref count changes, hence it's safe to pass false/0 for is_system
+- * argument
+- */
+- add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr,
+- num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
+- extent_op->is_data, 0, NULL, NULL, NULL);
++ add_delayed_ref_head(fs_info, trans, head_ref, NULL,
++ BTRFS_UPDATE_DELAYED_HEAD,
++ NULL, NULL, NULL);
+
+ spin_unlock(&delayed_refs->lock);
+ return 0;
diff --git a/patches.suse/btrfs-Take-trans-lock-before-access-running-trans-in.patch b/patches.suse/btrfs-Take-trans-lock-before-access-running-trans-in.patch
index 16d43c06b9..d676081c42 100644
--- a/patches.suse/btrfs-Take-trans-lock-before-access-running-trans-in.patch
+++ b/patches.suse/btrfs-Take-trans-lock-before-access-running-trans-in.patch
@@ -32,8 +32,8 @@ Acked-by: Nikolay Borisov <nborisov@suse.com>
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
-@@ -3072,7 +3072,11 @@ static noinline int check_delayed_ref(st
- struct btrfs_transaction *cur_trans;
+@@ -3073,7 +3073,11 @@ static noinline int check_delayed_ref(st
+ struct rb_node *node;
int ret = 0;
+ spin_lock(&root->fs_info->trans_lock);
@@ -44,7 +44,7 @@ Acked-by: Nikolay Borisov <nborisov@suse.com>
if (!cur_trans)
return 0;
-@@ -3081,6 +3085,7 @@ static noinline int check_delayed_ref(st
+@@ -3082,6 +3086,7 @@ static noinline int check_delayed_ref(st
head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
if (!head) {
spin_unlock(&delayed_refs->lock);
@@ -52,7 +52,7 @@ Acked-by: Nikolay Borisov <nborisov@suse.com>
return 0;
}
-@@ -3097,6 +3102,7 @@ static noinline int check_delayed_ref(st
+@@ -3098,6 +3103,7 @@ static noinline int check_delayed_ref(st
mutex_lock(&head->mutex);
mutex_unlock(&head->mutex);
btrfs_put_delayed_ref_head(head);
@@ -60,7 +60,7 @@ Acked-by: Nikolay Borisov <nborisov@suse.com>
return -EAGAIN;
}
spin_unlock(&delayed_refs->lock);
-@@ -3124,6 +3130,7 @@ static noinline int check_delayed_ref(st
+@@ -3130,6 +3136,7 @@ static noinline int check_delayed_ref(st
}
spin_unlock(&head->lock);
mutex_unlock(&head->mutex);
diff --git a/patches.suse/btrfs-fix-race-condition-between-delayed-refs-and-blockgroup-removal.patch b/patches.suse/btrfs-fix-race-condition-between-delayed-refs-and-blockgroup-removal.patch
index aac087d985..f7f380eaa6 100644
--- a/patches.suse/btrfs-fix-race-condition-between-delayed-refs-and-blockgroup-removal.patch
+++ b/patches.suse/btrfs-fix-race-condition-between-delayed-refs-and-blockgroup-removal.patch
@@ -55,7 +55,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
-@@ -548,8 +548,10 @@ add_delayed_ref_head(struct btrfs_fs_inf
+@@ -553,8 +553,10 @@ add_delayed_ref_head(struct btrfs_fs_inf
struct btrfs_delayed_ref_head *head_ref,
struct btrfs_qgroup_extent_record *qrecord,
u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
@@ -67,15 +67,15 @@ Signed-off-by: David Sterba <dsterba@suse.com>
{
struct btrfs_delayed_ref_head *existing;
struct btrfs_delayed_ref_root *delayed_refs;
-@@ -593,6 +595,7 @@ add_delayed_ref_head(struct btrfs_fs_inf
+@@ -598,6 +600,7 @@ add_delayed_ref_head(struct btrfs_fs_inf
head_ref->ref_mod = count_mod;
head_ref->must_insert_reserved = must_insert_reserved;
head_ref->is_data = is_data;
+ head_ref->is_system = is_system;
- INIT_LIST_HEAD(&head_ref->ref_list);
+ head_ref->ref_tree = RB_ROOT;
INIT_LIST_HEAD(&head_ref->ref_add_list);
RB_CLEAR_NODE(&head_ref->href_node);
-@@ -781,6 +784,7 @@ int btrfs_add_delayed_tree_ref(struct bt
+@@ -785,6 +788,7 @@ int btrfs_add_delayed_tree_ref(struct bt
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_qgroup_extent_record *record = NULL;
int qrecord_inserted;
@@ -83,7 +83,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
BUG_ON(extent_op && extent_op->is_data);
ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
-@@ -809,8 +813,8 @@ int btrfs_add_delayed_tree_ref(struct bt
+@@ -813,8 +817,8 @@ int btrfs_add_delayed_tree_ref(struct bt
*/
head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
bytenr, num_bytes, 0, 0, action, 0,
@@ -94,7 +94,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
num_bytes, parent, ref_root, level, action);
-@@ -876,7 +880,7 @@ int btrfs_add_delayed_data_ref(struct bt
+@@ -880,7 +884,7 @@ int btrfs_add_delayed_data_ref(struct bt
*/
head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
bytenr, num_bytes, ref_root, reserved,
@@ -103,7 +103,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
old_ref_mod, new_ref_mod);
add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
-@@ -906,9 +910,14 @@ int btrfs_add_delayed_extent_op(struct b
+@@ -910,9 +914,14 @@ int btrfs_add_delayed_extent_op(struct b
delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
@@ -121,7 +121,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
return 0;
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
-@@ -140,6 +140,7 @@ struct btrfs_delayed_ref_head {
+@@ -139,6 +139,7 @@ struct btrfs_delayed_ref_head {
*/
unsigned int must_insert_reserved:1;
unsigned int is_data:1;
diff --git a/patches.suse/lpfc-validate-command-in-lpfc_sli4_scmd_to_wqidx_dis.patch b/patches.suse/lpfc-validate-command-in-lpfc_sli4_scmd_to_wqidx_dis.patch
new file mode 100644
index 0000000000..ed07c65ac5
--- /dev/null
+++ b/patches.suse/lpfc-validate-command-in-lpfc_sli4_scmd_to_wqidx_dis.patch
@@ -0,0 +1,32 @@
+From: Hannes Reinecke <hare@suse.de>
+Date: Tue, 14 May 2019 12:36:46 +0200
+Subject: [PATCH] lpfc: validate command in lpfc_sli4_scmd_to_wqidx_distr()
+References: bsc#1129138
+Patch-Mainline: never, SLE15 specific
+
+According to Broadcom we need to validate the command to avoid
+crashes with blk-mq.
+
+Suggested-by: Justin Tee <justin.tee@broadcom.com>
+Signed-off-by: Hannes Reinecke <hare@suse.com>
+---
+ drivers/scsi/lpfc/lpfc_scsi.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
+index 100a4a5a5b99..d1aab85f5102 100644
+--- a/drivers/scsi/lpfc/lpfc_scsi.c
++++ b/drivers/scsi/lpfc/lpfc_scsi.c
+@@ -3932,7 +3932,8 @@ int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
+ uint32_t tag;
+ uint16_t hwq;
+
+- if (cmnd && shost_use_blk_mq(cmnd->device->host)) {
++ if (cmnd && shost_use_blk_mq(cmnd->device->host) &&
++ cmnd->request && cmnd->request->q) {
+ tag = blk_mq_unique_tag(cmnd->request);
+ hwq = blk_mq_unique_tag_to_hwq(tag);
+
+--
+2.16.4
+
diff --git a/patches.suse/revert-btrfs-qgroup-move-half-of-the-qgroup-accounting-time-out-of-commit-trans.patch b/patches.suse/revert-btrfs-qgroup-move-half-of-the-qgroup-accounting-time-out-of-commit-trans.patch
index f1c1f33fc2..1ff8b31a3b 100644
--- a/patches.suse/revert-btrfs-qgroup-move-half-of-the-qgroup-accounting-time-out-of-commit-trans.patch
+++ b/patches.suse/revert-btrfs-qgroup-move-half-of-the-qgroup-accounting-time-out-of-commit-trans.patch
@@ -40,31 +40,30 @@ deadlocks when resolving references.
Acked-by: Jeff Mahoney <jeffm@suse.com>
---
---
- fs/btrfs/delayed-ref.c | 20 +++-----------------
+ fs/btrfs/delayed-ref.c | 22 ++++------------------
fs/btrfs/qgroup.c | 30 +++---------------------------
fs/btrfs/qgroup.h | 33 +++------------------------------
- 3 files changed, 9 insertions(+), 74 deletions(-)
+ 3 files changed, 10 insertions(+), 75 deletions(-)
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
-@@ -549,7 +549,6 @@ add_delayed_ref_head(struct btrfs_fs_inf
+@@ -615,13 +615,11 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_head *head_ref,
struct btrfs_qgroup_extent_record *qrecord,
- u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
- int action, int is_data, int is_system,
-- int *qrecord_inserted_ret,
- int *old_ref_mod, int *new_ref_mod)
+- int action, int *qrecord_inserted_ret,
+- int *old_ref_mod, int *new_ref_mod)
++ int action, int *old_ref_mod, int *new_ref_mod)
{
-@@ -557,7 +556,6 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ struct btrfs_delayed_ref_head *existing;
struct btrfs_delayed_ref_root *delayed_refs;
- int count_mod = 1;
- int must_insert_reserved = 0;
- int qrecord_inserted = 0;
- /* If reserved is provided, it must be a data extent. */
- BUG_ON(!is_data && reserved);
-@@ -618,8 +616,6 @@ add_delayed_ref_head(struct btrfs_fs_inf
- if(btrfs_qgroup_trace_extent_nolock(fs_info,
+ delayed_refs = &trans->transaction->delayed_refs;
+
+@@ -630,8 +628,6 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ if (btrfs_qgroup_trace_extent_nolock(fs_info,
delayed_refs, qrecord))
kfree(qrecord);
- else
@@ -72,7 +71,7 @@ Acked-by: Jeff Mahoney <jeffm@suse.com>
}
trace_add_delayed_ref_head(fs_info, head_ref, action);
-@@ -645,8 +641,6 @@ add_delayed_ref_head(struct btrfs_fs_inf
+@@ -657,8 +653,6 @@ add_delayed_ref_head(struct btrfs_fs_inf
atomic_inc(&delayed_refs->num_entries);
trans->delayed_ref_updates++;
}
@@ -80,64 +79,67 @@ Acked-by: Jeff Mahoney <jeffm@suse.com>
- *qrecord_inserted_ret = qrecord_inserted;
if (new_ref_mod)
*new_ref_mod = head_ref->total_ref_mod;
- return head_ref;
-@@ -779,7 +773,6 @@ int btrfs_add_delayed_tree_ref(struct bt
+
+@@ -732,7 +726,6 @@ int btrfs_add_delayed_tree_ref(struct bt
struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_qgroup_extent_record *record = NULL;
- int qrecord_inserted;
- int is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
-
- BUG_ON(extent_op && extent_op->is_data);
-@@ -809,15 +802,12 @@ int btrfs_add_delayed_tree_ref(struct bt
+ bool is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
+ int ret;
+ u8 ref_type;
+@@ -775,8 +768,7 @@ int btrfs_add_delayed_tree_ref(struct bt
+ * the spin lock
*/
head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
- bytenr, num_bytes, 0, 0, action, 0,
-- is_system, &qrecord_inserted,
+- action, &qrecord_inserted,
- old_ref_mod, new_ref_mod);
-+ is_system, old_ref_mod, new_ref_mod);
++ action, old_ref_mod, new_ref_mod);
- add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
- num_bytes, parent, ref_root, level, action);
- spin_unlock(&delayed_refs->lock);
+
+ ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
+@@ -788,8 +780,6 @@ int btrfs_add_delayed_tree_ref(struct bt
+ if (ret > 0)
+ kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
- if (qrecord_inserted)
- return btrfs_qgroup_trace_extent_post(fs_info, record);
return 0;
free_head_ref:
-@@ -842,7 +832,6 @@ int btrfs_add_delayed_data_ref(struct bt
+@@ -814,7 +804,6 @@ int btrfs_add_delayed_data_ref(struct bt
struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_qgroup_extent_record *record = NULL;
- int qrecord_inserted;
+ int ret;
+ u8 ref_type;
- ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
- if (!ref)
-@@ -876,16 +865,13 @@ int btrfs_add_delayed_data_ref(struct bt
+@@ -863,8 +852,7 @@ int btrfs_add_delayed_data_ref(struct bt
+ * the spin lock
*/
head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
- bytenr, num_bytes, ref_root, reserved,
-- action, 1, 0, &qrecord_inserted,
+- action, &qrecord_inserted,
- old_ref_mod, new_ref_mod);
-+ action, 1, 0, old_ref_mod, new_ref_mod);
++ action, old_ref_mod, new_ref_mod);
- add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
- num_bytes, parent, ref_root, owner, offset,
- action);
+ ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
spin_unlock(&delayed_refs->lock);
+@@ -875,8 +863,6 @@ int btrfs_add_delayed_data_ref(struct bt
+ if (ret > 0)
+ kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
- if (qrecord_inserted)
- return btrfs_qgroup_trace_extent_post(fs_info, record);
return 0;
}
-@@ -913,7 +899,7 @@ int btrfs_add_delayed_extent_op(struct b
- */
- add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr,
- num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
-- extent_op->is_data, 0, NULL, NULL, NULL);
-+ extent_op->is_data, 0, NULL, NULL);
+@@ -902,7 +888,7 @@ int btrfs_add_delayed_extent_op(struct b
+
+ add_delayed_ref_head(fs_info, trans, head_ref, NULL,
+ BTRFS_UPDATE_DELAYED_HEAD,
+- NULL, NULL, NULL);
++ NULL, NULL);
spin_unlock(&delayed_refs->lock);
return 0;
diff --git a/series.conf b/series.conf
index 2315f18bd8..58e4c5f89a 100644
--- a/series.conf
+++ b/series.conf
@@ -8282,6 +8282,7 @@
patches.suse/0019-btrfs-make-the-delalloc-block-rsv-per-inode.patch
patches.suse/0021-btrfs-switch-args-for-comp_-_refs.patch
patches.suse/0022-btrfs-add-a-comp_refs-helper.patch
+ patches.suse/0001-btrfs-track-refs-in-a-rb_tree-instead-of-a-list.patch
patches.suse/btrfs-move-btrfs_truncate_block-out-of-trans-handle.patch
patches.suse/btrfs-Fix-bug-for-misused-dev_t-when-lookup-in-dev-s.patch
patches.fixes/jfs-remove-increment-of-i_version-counter.patch
@@ -16410,6 +16411,7 @@
patches.suse/msft-hv-1696-KVM-x86-fix-UD-address-of-failed-Hyper-V-hypercalls.patch
patches.fixes/sched-tracing-Fix-trace_sched_pi_setprio-for-deboost.patch
patches.arch/47-kvm-vmx-expose-ssbd-properly-to-guests.patch
+ patches.arch/x86-speculation-simplify-the-cpu-bug-detection-logic.patch
patches.fixes/tracing-Fix-crash-when-freeing-instances-with-event-.patch
patches.suse/0001-tracing-Make-the-snapshot-trigger-work-with-instance.patch
patches.fixes/afs-Fix-directory-permissions-check.patch
@@ -16490,6 +16492,14 @@
patches.drivers/hwmon-asus_atk0110-Replace-deprecated-device-registe
patches.drivers/spi-bcm63xx-hspi-Enable-the-clock-before-calling-clk
patches.drivers/spi-pxa2xx-check-clk_prepare_enable-return-value
+ patches.suse/0001-btrfs-Factor-out-common-delayed-refs-init-code.patch
+ patches.suse/0002-btrfs-Use-init_delayed_ref_common-in-add_delayed_tre.patch
+ patches.suse/0003-btrfs-Use-init_delayed_ref_common-in-add_delayed_dat.patch
+ patches.suse/0004-btrfs-Open-code-add_delayed_tree_ref.patch
+ patches.suse/0005-btrfs-Open-code-add_delayed_data_ref.patch
+ patches.suse/0006-btrfs-Introduce-init_delayed_ref_head.patch
+ patches.suse/0007-btrfs-Use-init_delayed_ref_head-in-add_delayed_ref_h.patch
+ patches.suse/0008-btrfs-split-delayed-ref-head-initialization-and-addi.patch
patches.suse/0001-btrfs-qgroup-Search-commit-root-for-rescan-to-avoid-.patch
patches.suse/0002-btrfs-qgroup-Finish-rescan-when-hit-the-last-leaf-of.patch
patches.suse/btrfs-update-stale-comments-referencing-vmtruncate.patch
@@ -18114,6 +18124,7 @@
patches.suse/sched-numa-Update-the-scan-period-without-holding-the-numa_group-lock.patch
patches.suse/sched-numa-Use-group_weights-to-identify-if-migration-degrades-locality.patch
patches.suse/sched-numa-Move-task_numa_placement-closer-to-numa_migrate_preferred.patch
+ patches.arch/locking-atomics-asm-generic-move-some-macros-from-linux-bitops-h-to-a-new-linux-bits-h-file.patch
patches.arch/perf-x86-intel-lbr-fix-incomplete-lbr-call-stack
patches.fixes/kprobes-make-list-and-blacklist-root-user-read-only.patch
patches.arch/kprobes-x86-fix-p-uses-in-error-messages
@@ -19586,6 +19597,7 @@
patches.fixes/s390-sles12sp4-pkey-move-pckmo-subfunction-available-checks-away-from-module-init.patch
patches.suse/rcu-Make-need_resched-respond-to-urgent-RCU-QS-needs.patch
patches.fixes/kprobes-Return-error-if-we-fail-to-reuse-kprobe-inst.patch
+ patches.arch/x86-cpu-sanitize-fam6_atom-naming.patch
patches.suse/sched-numa-remove-unused-code-from-update_numa_stats.patch
patches.suse/sched-numa-remove-unused-nr_running-field.patch
patches.arch/x86-corruption-check-fix-panic-in-memory_corruption_check-when-boot-option-without-value-is-provided
@@ -20427,6 +20439,7 @@
patches.drivers/i2c-scmi-Fix-probe-error-on-devices-with-an-empty-SM.patch
patches.fixes/nvme-validate-controller-state-before-rescheduling-k.patch
patches.drivers/Revert-PCI-ASPM-Do-not-initialize-link-state-when-as.patch
+ patches.fixes/vhost-vsock-fix-reset-orphans-race-with-close-timeou.patch
patches.fixes/virtio-s390-avoid-race-on-vcdev-config
patches.fixes/virtio-s390-fix-race-in-ccw_io_helper
patches.drivers/vhost-vsock-fix-use-after-free-in-network-stack-call.patch
@@ -20636,6 +20649,7 @@
patches.arch/kvm-nvmx-set-vm-instruction-error-for-vmptrld-of-unbacked-page
patches.arch/kvm-nvmx-free-the-vmread-vmwrite-bitmaps-if-alloc_kvm_area-fails
patches.arch/kvm-vmx-set-ia32_tsc_aux-for-legacy-mode-guests
+ patches.arch/kvm-x86-report-stibp-on-get_supported_cpuid.patch
patches.fixes/arm-arm64-KVM-vgic-Force-VM-halt-when-changing-the-a.patch
patches.arch/KVM-PPC-Book3S-HV-Fix-race-between-kvm_unmap_hva_ran.patch
patches.fixes/KVM-PPC-Book3S-PR-Set-hflag-to-indicate-that-POWER9-.patch
@@ -21203,6 +21217,8 @@
patches.fixes/dccp-fool-proof-ccid_hc_-rt-x_parse_options.patch
patches.fixes/bpf-fix-lockdep-false-positive-in-percpu_freelist.patch
patches.fixes/bpf-Fix-syscall-s-stackmap-lookup-potential-deadlock.patch
+ patches.fixes/vsock-virtio-fix-kernel-panic-after-device-hot-unplu.patch
+ patches.fixes/vsock-virtio-reset-connected-sockets-on-device-remov.patch
patches.suse/net-systemport-Fix-WoL-with-password-after-deep-slee.patch
patches.suse/net-dp83640-expire-old-TX-skb.patch
patches.fixes/net-smc-preallocated-memory-for-rdma-work-requests
@@ -21663,6 +21679,7 @@
patches.suse/net-hsr-fix-possible-crash-in-add_timer.patch
patches.suse/route-set-the-deleted-fnhe-fnhe_daddr-to-0-in-ip_del.patch
patches.fixes/0001-vxlan-Fix-GRO-cells-race-condition-between-receive-a.patch
+ patches.fixes/vsock-virtio-fix-kernel-panic-from-virtio_transport_.patch
patches.fixes/0001-tcp-handle-inet_csk_reqsk_queue_add-failures.patch
patches.fixes/bpf-fix-replace_map_fd_with_map_ptr-s-ldimm64-second.patch
patches.suse/rxrpc-Fix-client-call-queueing-waiting-for-channel.patch
@@ -21903,6 +21920,7 @@
patches.drivers/fm10k-Fix-a-potential-NULL-pointer-dereference.patch
patches.drivers/qmi_wwan-add-Olicard-600.patch
patches.fixes/openvswitch-fix-flow-actions-reallocation.patch
+ patches.fixes/net-rds-force-to-destroy-connection-if-t_sock-is-NUL.patch
patches.fixes/bpf-fix-use-after-free-in-bpf_evict_inode.patch
patches.suse/kcm-switch-order-of-device-registration-to-fix-a-cra.patch
patches.fixes/0001-ipv6-Fix-dangling-pointer-when-ipv6-fragment.patch
@@ -21910,6 +21928,7 @@
patches.drivers/ibmvnic-Fix-completion-structure-initialization.patch
patches.drm/drm-i915-gvt-do-not-deliver-a-workload-if-its-creati.patch
patches.drm/0002-drm-i915-gvt-do-not-let-pin-count-of-shadow-mm-go-ne.patch
+ patches.fixes/vfio-pci-use-correct-format-characters.patch
patches.drivers/vfio-type1-limit-dma-mappings-per-container
patches.fixes/dm-disable-DISCARD-if-the-underlying-storage-no-long.patch
patches.fixes/mm-huge_memory.c-fix-modifying-of-page-protection-by-insert_pfn_pmd.patch
@@ -21972,6 +21991,8 @@
patches.drivers/staging-comedi-vmk80xx-Fix-possible-double-free-of-u.patch
patches.drivers/staging-comedi-ni_usb6501-Fix-use-of-uninitialized-m.patch
patches.drivers/staging-comedi-ni_usb6501-Fix-possible-double-free-o.patch
+ patches.drivers/sc16is7xx-move-label-err_spi-to-correct-section.patch
+ patches.drivers/sc16is7xx-put-err_spi-and-err_i2c-into-correct-ifdef.patch
patches.fixes/device_cgroup-fix-RCU-imbalance-in-error-case.patch
patches.arch/x86-speculation-prevent-deadlock-on-ssb_state-lock.patch
patches.drivers/ALSA-hda-Initialize-power_state-field-properly.patch
@@ -22034,6 +22055,7 @@
patches.suse/0001-btrfs-delayed-ref-Introduce-better-documented-delaye.patch
patches.suse/0002-btrfs-extent-tree-Open-code-process_func-in-__btrfs_.patch
patches.drivers/mmc-core-fix-possible-use-after-free-of-host.patch
+ patches.drivers/phy-sun4i-usb-Make-sure-to-disable-PHY0-passby-for-p.patch
patches.drivers/stm-class-Fix-channel-free-in-stm-output-free-path.patch
patches.drivers/intel_th-pci-Add-Comet-Lake-support.patch
patches.drivers/intel_th-msu-Fix-single-mode-with-IOMMU.patch
@@ -22057,6 +22079,7 @@
patches.drivers/mwl8k-Fix-rate_idx-underflow.patch
patches.drivers/rtlwifi-rtl8723ae-Fix-missing-break-in-switch-statem.patch
patches.drivers/brcm80211-potential-NULL-dereference-in-brcmf_cfg802.patch
+ patches.fixes/0001-dt-bindings-net-Fix-a-typo-in-the-phy-mode-list-for-.patch
patches.drivers/usb-storage-Set-virt_boundary_mask-to-avoid-SG-overf.patch
patches.drivers/USB-cdc-acm-fix-unthrottle-races.patch
patches.fixes/0001-UAS-fix-alignment-of-scatter-gather-segments.patch
@@ -22082,6 +22105,8 @@
patches.fixes/scripts-override-locale-from-environment-when-runnin.patch
patches.drm/drm-i915-Fix-I915_EXEC_RING_MASK.patch
patches.drm/drm-fb-helper-dpms_legacy-Only-set-on-connectors-in-.patch
+ patches.drm/0004-drm-i915-gvt-Fix-incorrect-mask-of-mmio-0x22028-in-g.patch
+ patches.drm/0005-drm-meson-add-size-and-alignment-requirements-for-du.patch
patches.drm/drm-rockchip-shutdown-drm-subsystem-on-shutdown.patch
patches.drivers/ALSA-timer-Unify-timer-callback-process-code.patch
patches.drivers/ALSA-timer-Make-sure-to-clear-pending-ack-list.patch
@@ -22129,9 +22154,16 @@
patches.drivers/platform-x86-intel_punit_ipc-Revert-Fix-resource-ior.patch
patches.drivers/platform-x86-alienware-wmi-printing-the-wrong-error-.patch
patches.drivers/platform-x86-sony-laptop-Fix-unintentional-fall-thro.patch
+ patches.fixes/vfio-mdev-Avoid-release-parent-reference-during-erro.patch
+ patches.fixes/vfio-mdev-Fix-aborting-mdev-child-device-removal-if-.patch
patches.drivers/mtd-nand-omap-Fix-comment-in-platform-data-using-wro.patch
patches.fixes/0001-mtd-spi-nor-intel-spi-Avoid-crossing-4K-address-boun.patch
patches.drivers/mtd-part-fix-incorrect-format-specifier-for-an-unsig.patch
+ patches.drivers/iommu-vt-d-don-t-request-page-request-irq-under-dmar_global_lock
+ patches.drivers/iommu-vt-d-set-intel_iommu_gfx_mapped-correctly
+ patches.drivers/iommu-vt-d-make-kernel-parameter-igfx_off-work-with-viommu
+ patches.drivers/net-ibmvnic-Update-MAC-address-settings-after-adapte.patch
+ patches.drivers/net-ibmvnic-Update-carrier-state-after-link-state-ch.patch
# powerpc/linux next
patches.arch/powerpc-numa-improve-control-of-topology-updates.patch
@@ -22157,6 +22189,8 @@
patches.fixes/ch-fixup-refcounting-imbalance-for-SCSI-devices.patch
patches.suse/scsi-libsas-allocate-sense-buffer-for-bsg-queue.patch
patches.fixes/qla2xxx-always-allocate-qla_tgt_wq.patch
+ patches.suse/lpfc-validate-command-in-lpfc_sli4_scmd_to_wqidx_dis.patch
+ patches.suse/0001-drm-ttm-Remove-warning-about-inconsistent-mapping-in.patch
########################################################
# end of sorted patches
@@ -22165,6 +22199,25 @@
# git://git.infradead.org/nvme.git nvme-5.2
patches.fixes/nvme-multipath-split-bios-with-the-ns_head-bio_set-b.patch
+ # MDS
+ patches.arch/x86-msr-index-cleanup-bit-defines.patch
+ patches.arch/x86-speculation-consolidate-cpu-whitelists.patch
+ patches.arch/x86-speculation-mds-add-basic-bug-infrastructure-for-mds.patch
+ patches.arch/x86-speculation-mds-add-bug_msbds_only.patch
+ patches.arch/x86-kvm-expose-x86_feature_md_clear-to-guests.patch
+ patches.arch/x86-speculation-mds-add-mds_clear_cpu_buffers.patch
+ patches.arch/x86-speculation-mds-clear-cpu-buffers-on-exit-to-user.patch
+ patches.arch/x86-kvm-vmx-add-mds-protection-when-l1d-flush-is-not-active.patch
+ patches.arch/x86-speculation-mds-conditionally-clear-cpu-buffers-on-idle-entry.patch
+ patches.arch/x86-speculation-mds-add-mitigation-control-for-mds.patch
+ patches.arch/x86-speculation-mds-add-sysfs-reporting-for-mds.patch
+ patches.arch/x86-speculation-mds-add-mitigation-mode-vmwerv.patch
+ patches.arch/x86-speculation-mds-add-mds-full-nosmt-cmdline-option.patch
+ patches.arch/x86-speculation-move-arch_smt_update-call-to-after-mitigation-decisions.patch
+ patches.arch/x86-speculation-mds-add-smt-warning-message.patch
+ patches.arch/x86-speculation-mds-print-smt-vulnerable-on-msbds-with-mitigations-off.patch
+ patches.arch/x86-speculation-mds-add-mitigations-support-for-mds.patch
+
########################################################
#
# packaging-specific patches (tweaks for autobuild,