Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKernel Build Daemon <kbuild@suse.de>2018-06-17 07:04:28 +0200
committerKernel Build Daemon <kbuild@suse.de>2018-06-17 07:04:28 +0200
commit5a895c7954d0015560dfb7f5e36b1870a4e740d4 (patch)
treebd7595134510e4d008342248636f7c8fe6d540fd
parent4ece9f935712a7f668b74336ed0366ad442861b0 (diff)
parent06238a187fb7aafb7088fc617cb0a0efd6259d80 (diff)
Merge branch 'SLE12-SP3' into openSUSE-42.3
-rw-r--r--patches.arch/0003-x86-fpu-x86-mm-pkeys-add-pkru-xsave-fields-and-data-structures2
-rw-r--r--patches.arch/0052-x86-pkeys-make-protection-keys-an-eager-feature4
-rw-r--r--patches.kabi/kabi-protect-struct-x86_emulate_ops.patch46
-rw-r--r--patches.kernel.org/4.4.138-001-x86-fpu-Fix-early-FPU-command-line-parsing.patch (renamed from patches.suse/0001-x86-fpu-Fix-early-FPU-command-line-parsing.patch)27
-rw-r--r--patches.kernel.org/4.4.138-002-x86-Remove-unused-function-cpu_has_ht_sibling.patch43
-rw-r--r--patches.kernel.org/4.4.138-003-x86-cpufeature-Remove-unused-and-seldomly-use.patch530
-rw-r--r--patches.kernel.org/4.4.138-004-x86-fpu-Disable-MPX-when-eagerfpu-is-off.patch (renamed from patches.suse/0003-x86-fpu-Disable-MPX-when-eagerfpu-is-off.patch)39
-rw-r--r--patches.kernel.org/4.4.138-005-x86-fpu-Disable-AVX-when-eagerfpu-is-off.patch107
-rw-r--r--patches.kernel.org/4.4.138-006-x86-fpu-Default-eagerfpu-on-on-all-CPUs.patch (renamed from patches.suse/0005-x86-fpu-Default-eagerfpu-on-on-all-CPUs.patch)22
-rw-r--r--patches.kernel.org/4.4.138-007-x86-fpu-Fix-no387-regression.patch (renamed from patches.suse/0001-x86-fpu-Fix-no387-regression.patch)18
-rw-r--r--patches.kernel.org/4.4.138-008-x86-fpu-Revert-x86-fpu-Disable-AVX-when-eager.patch100
-rw-r--r--patches.kernel.org/4.4.138-009-x86-fpu-Fix-eager-FPU-handling-on-legacy-FPU-.patch (renamed from patches.suse/0001-x86-fpu-Fix-eager-FPU-handling-on-legacy-FPU-machine.patch)24
-rw-r--r--patches.kernel.org/4.4.138-010-x86-fpu-Hard-disable-lazy-FPU-mode.patch206
-rw-r--r--patches.kernel.org/4.4.138-011-x86-fpu-Fix-FNSAVE-usage-in-eagerfpu-mode.patch (renamed from patches.suse/0002-x86-fpu-Fix-FNSAVE-usage-in-eagerfpu-mode.patch)18
-rw-r--r--patches.kernel.org/4.4.138-012-x86-fpu-Fix-math-emulation-in-eager-fpu-mode.patch (renamed from patches.suse/0001-x86-fpu-Fix-math-emulation-in-eager-fpu-mode.patch)28
-rw-r--r--patches.kernel.org/4.4.138-013-af_key-Always-verify-length-of-provided-sadb_.patch113
-rw-r--r--patches.kernel.org/4.4.138-014-x86-crypto-x86-fpu-Remove-X86_FEATURE_EAGER_F.patch62
-rw-r--r--patches.kernel.org/4.4.138-015-gpio-No-NULL-owner.patch51
-rw-r--r--patches.kernel.org/4.4.138-016-Clarify-and-fix-MAX_LFS_FILESIZE-macros.patch89
-rw-r--r--patches.kernel.org/4.4.138-017-KVM-x86-introduce-linear_-read-write-_system.patch190
-rw-r--r--patches.kernel.org/4.4.138-018-KVM-x86-pass-kvm_vcpu-to-kvm_read_guest_virt-.patch203
-rw-r--r--patches.kernel.org/4.4.138-019-serial-samsung-fix-maxburst-parameter-for-DMA.patch48
-rw-r--r--patches.kernel.org/4.4.138-020-vmw_balloon-fixing-double-free-when-batching-.patch119
-rw-r--r--patches.kernel.org/4.4.138-021-kvm-x86-use-correct-privilege-level-for-sgdt-.patch159
-rw-r--r--patches.kernel.org/4.4.138-022-Input-goodix-add-new-ACPI-id-for-GPD-Win-2-to.patch37
-rw-r--r--patches.kernel.org/4.4.138-023-Input-elan_i2c-add-ELAN0612-Lenovo-v330-14IKB.patch37
-rw-r--r--patches.kernel.org/4.4.138-024-crypto-vmx-Remove-overly-verbose-printk-from-.patch90
-rw-r--r--patches.kernel.org/4.4.138-025-Linux-4.4.138.patch27
-rw-r--r--patches.suse/revert-x86-fpu-Hard-disable-lazy-FPU-mode.patch169
-rw-r--r--series.conf34
30 files changed, 2578 insertions, 64 deletions
diff --git a/patches.arch/0003-x86-fpu-x86-mm-pkeys-add-pkru-xsave-fields-and-data-structures b/patches.arch/0003-x86-fpu-x86-mm-pkeys-add-pkru-xsave-fields-and-data-structures
index bb606a5441..a881e1c986 100644
--- a/patches.arch/0003-x86-fpu-x86-mm-pkeys-add-pkru-xsave-fields-and-data-structures
+++ b/patches.arch/0003-x86-fpu-x86-mm-pkeys-add-pkru-xsave-fields-and-data-structures
@@ -74,7 +74,7 @@ Acked-by: Joerg Roedel <jroedel@suse.de>
+++ b/arch/x86/include/asm/fpu/xstate.h
@@ -24,7 +24,8 @@
XFEATURE_MASK_YMM | \
- XFEATURE_MASK_OPMASK | \
+ XFEATURE_MASK_OPMASK | \
XFEATURE_MASK_ZMM_Hi256 | \
- XFEATURE_MASK_Hi16_ZMM)
+ XFEATURE_MASK_Hi16_ZMM | \
diff --git a/patches.arch/0052-x86-pkeys-make-protection-keys-an-eager-feature b/patches.arch/0052-x86-pkeys-make-protection-keys-an-eager-feature
index 6b6db0e107..46c115fdc7 100644
--- a/patches.arch/0052-x86-pkeys-make-protection-keys-an-eager-feature
+++ b/patches.arch/0052-x86-pkeys-make-protection-keys-an-eager-feature
@@ -30,14 +30,14 @@ Link: http://lkml.kernel.org/r/20161007162342.28A49813@viggo.jf.intel.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Joerg Roedel <jroedel@suse.de>
---
- arch/x86/include/asm/fpu/xstate.h | 7 ++++---
+ arch/x86/include/asm/fpu/xstate.h | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
--- a/arch/x86/include/asm/fpu/xstate.h
+++ b/arch/x86/include/asm/fpu/xstate.h
@@ -24,11 +24,12 @@
XFEATURE_MASK_YMM | \
- XFEATURE_MASK_OPMASK | \
+ XFEATURE_MASK_OPMASK | \
XFEATURE_MASK_ZMM_Hi256 | \
- XFEATURE_MASK_Hi16_ZMM | \
- XFEATURE_MASK_PKRU)
diff --git a/patches.kabi/kabi-protect-struct-x86_emulate_ops.patch b/patches.kabi/kabi-protect-struct-x86_emulate_ops.patch
new file mode 100644
index 0000000000..1bbe8672dd
--- /dev/null
+++ b/patches.kabi/kabi-protect-struct-x86_emulate_ops.patch
@@ -0,0 +1,46 @@
+From: Jiri Slaby <jslaby@suse.cz>
+Subject: kABI: protect struct x86_emulate_ops
+Patch-mainline: never, kabi
+References: kabi
+
+In 4.4.138, commit 25bdf0807b52fc99c3e7a6b424a9e847510dab15 (kvm: x86:
+use correct privilege level for sgdt/sidt/fxsave/fxrstor access),
+upstream commit 3c9fa24ca7c9c47605672916491f79e8ccacb9e6 added one
+parameter to two hooks in struct x86_emulate_ops. It made the kABI
+checker to complain.
+
+Given the structure is referenced from struct x86_emulate_ctxt via
+pointer, is internal only to x86, and the hooks are defined only there,
+it should cause no real harm, so hide the change from the kABI checker.
+
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ arch/x86/include/asm/kvm_emulate.h | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/arch/x86/include/asm/kvm_emulate.h
++++ b/arch/x86/include/asm/kvm_emulate.h
+@@ -110,7 +110,11 @@ struct x86_emulate_ops {
+ int (*read_std)(struct x86_emulate_ctxt *ctxt,
+ unsigned long addr, void *val,
+ unsigned int bytes,
++#ifdef __GENKSYMS__
++ struct x86_exception *fault);
++#else
+ struct x86_exception *fault, bool system);
++#endif
+
+ /*
+ * read_phys: Read bytes of standard (non-emulated/special) memory.
+@@ -132,7 +136,11 @@ struct x86_emulate_ops {
+ */
+ int (*write_std)(struct x86_emulate_ctxt *ctxt,
+ unsigned long addr, void *val, unsigned int bytes,
++#ifdef __GENKSYMS__
++ struct x86_exception *fault);
++#else
+ struct x86_exception *fault, bool system);
++#endif
+ /*
+ * fetch: Read bytes of standard (non-emulated/special) memory.
+ * Used for instruction fetch.
diff --git a/patches.suse/0001-x86-fpu-Fix-early-FPU-command-line-parsing.patch b/patches.kernel.org/4.4.138-001-x86-fpu-Fix-early-FPU-command-line-parsing.patch
index 3b6768f1ef..bba928a231 100644
--- a/patches.suse/0001-x86-fpu-Fix-early-FPU-command-line-parsing.patch
+++ b/patches.kernel.org/4.4.138-001-x86-fpu-Fix-early-FPU-command-line-parsing.patch
@@ -1,9 +1,11 @@
From: yu-cheng yu <yu-cheng.yu@intel.com>
Date: Wed, 6 Jan 2016 14:24:51 -0800
-Subject: x86/fpu: Fix early FPU command-line parsing
+Subject: [PATCH] x86/fpu: Fix early FPU command-line parsing
+Patch-mainline: 4.4.138
+References: CVE-2018-3665 bnc#1012382 bnc#1087086
Git-commit: 4f81cbafcce2c603db7865e9d0e461f7947d77d4
-Patch-mainline: v4.5-rc1
-References: bnc#1087086 CVE-2018-3665
+
+commit 4f81cbafcce2c603db7865e9d0e461f7947d77d4 upstream.
The function fpu__init_system() is executed before
parse_early_param(). This causes wrong FPU configuration. This
@@ -37,11 +39,14 @@ Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: yu-cheng yu <yu-cheng.yu@intel.com>
Link: http://lkml.kernel.org/r/1452119094-7252-2-git-send-email-yu-cheng.yu@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Jiri Slaby <jslaby@suse.cz>
---
- arch/x86/kernel/fpu/init.c | 109 +++++++++++++++------------------------------
+ arch/x86/kernel/fpu/init.c | 109 +++++++++++++------------------------
1 file changed, 38 insertions(+), 71 deletions(-)
+diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
+index 1011c05b1bd5..06ea88b4ca44 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -3,8 +3,11 @@
@@ -56,7 +61,7 @@ Signed-off-by: Jiri Slaby <jslaby@suse.cz>
/*
* Initialize the TS bit in CR0 according to the style of context-switches
-@@ -271,18 +274,6 @@ static void __init fpu__init_system_xsta
+@@ -262,18 +265,6 @@ static void __init fpu__init_system_xstate_size_legacy(void)
*/
static enum { AUTO, ENABLE, DISABLE } eagerfpu = AUTO;
@@ -75,10 +80,11 @@ Signed-off-by: Jiri Slaby <jslaby@suse.cz>
/*
* Pick the FPU context switching strategy:
*/
-@@ -317,11 +308,46 @@ static void __init fpu__init_system_ctx_
+@@ -307,12 +298,47 @@ static void __init fpu__init_system_ctx_switch(void)
+ printk(KERN_INFO "x86/fpu: Using '%s' FPU context switches.\n", eagerfpu == ENABLE ? "eager" : "lazy");
}
- /*
++/*
+ * We parse fpu parameters early because fpu__init_system() is executed
+ * before parse_early_param().
+ */
@@ -112,7 +118,7 @@ Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+ setup_clear_cpu_cap(X86_FEATURE_XSAVES);
+}
+
-+/*
+ /*
* Called on the boot CPU once per system bootup, to set up the initial
* FPU state that is later cloned into all processes:
*/
@@ -122,7 +128,7 @@ Signed-off-by: Jiri Slaby <jslaby@suse.cz>
fpu__init_system_early_generic(c);
/*
-@@ -345,62 +371,3 @@ void __init fpu__init_system(struct cpui
+@@ -336,62 +362,3 @@ void __init fpu__init_system(struct cpuinfo_x86 *c)
fpu__init_system_ctx_switch();
}
@@ -185,3 +191,6 @@ Signed-off-by: Jiri Slaby <jslaby@suse.cz>
- return 1;
-}
-__setup("nofxsr", x86_nofxsr_setup);
+--
+2.17.1
+
diff --git a/patches.kernel.org/4.4.138-002-x86-Remove-unused-function-cpu_has_ht_sibling.patch b/patches.kernel.org/4.4.138-002-x86-Remove-unused-function-cpu_has_ht_sibling.patch
new file mode 100644
index 0000000000..9553c019b1
--- /dev/null
+++ b/patches.kernel.org/4.4.138-002-x86-Remove-unused-function-cpu_has_ht_sibling.patch
@@ -0,0 +1,43 @@
+From: Juergen Gross <jgross@suse.com>
+Date: Tue, 17 Nov 2015 13:05:43 +0100
+Subject: [PATCH] x86: Remove unused function cpu_has_ht_siblings()
+References: bnc#1012382
+Patch-mainline: 4.4.138
+Git-commit: ed29210cd6a67425026e78aa298fa434e11a74e3
+
+commit ed29210cd6a67425026e78aa298fa434e11a74e3 upstream.
+
+It is used nowhere.
+
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Link: http://lkml.kernel.org/r/1447761943-770-1-git-send-email-jgross@suse.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ arch/x86/include/asm/smp.h | 9 ---------
+ 1 file changed, 9 deletions(-)
+
+diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
+index 222a6a3ca2b5..a438c5598a90 100644
+--- a/arch/x86/include/asm/smp.h
++++ b/arch/x86/include/asm/smp.h
+@@ -21,15 +21,6 @@
+ extern int smp_num_siblings;
+ extern unsigned int num_processors;
+
+-static inline bool cpu_has_ht_siblings(void)
+-{
+- bool has_siblings = false;
+-#ifdef CONFIG_SMP
+- has_siblings = cpu_has_ht && smp_num_siblings > 1;
+-#endif
+- return has_siblings;
+-}
+-
+ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
+ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
+ /* cpus sharing the last level cache: */
+--
+2.17.1
+
diff --git a/patches.kernel.org/4.4.138-003-x86-cpufeature-Remove-unused-and-seldomly-use.patch b/patches.kernel.org/4.4.138-003-x86-cpufeature-Remove-unused-and-seldomly-use.patch
new file mode 100644
index 0000000000..134af1c401
--- /dev/null
+++ b/patches.kernel.org/4.4.138-003-x86-cpufeature-Remove-unused-and-seldomly-use.patch
@@ -0,0 +1,530 @@
+From: Borislav Petkov <bp@suse.de>
+Date: Mon, 7 Dec 2015 10:39:41 +0100
+Subject: [PATCH] x86/cpufeature: Remove unused and seldomly used cpu_has_xx
+ macros
+References: bnc#1012382
+Patch-mainline: 4.4.138
+Git-commit: 362f924b64ba0f4be2ee0cb697690c33d40be721
+
+commit 362f924b64ba0f4be2ee0cb697690c33d40be721 upstream.
+
+Those are stupid and code should use static_cpu_has_safe() or
+boot_cpu_has() instead. Kill the least used and unused ones.
+
+The remaining ones need more careful inspection before a conversion can
+happen. On the TODO.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Link: http://lkml.kernel.org/r/1449481182-27541-4-git-send-email-bp@alien8.de
+Cc: David Sterba <dsterba@suse.com>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Matt Mackall <mpm@selenic.com>
+Cc: Chris Mason <clm@fb.com>
+Cc: Josef Bacik <jbacik@fb.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ arch/x86/crypto/chacha20_glue.c | 2 +-
+ arch/x86/crypto/crc32c-intel_glue.c | 2 +-
+ arch/x86/include/asm/cmpxchg_32.h | 2 +-
+ arch/x86/include/asm/cmpxchg_64.h | 2 +-
+ arch/x86/include/asm/cpufeature.h | 37 +++------------------
+ arch/x86/include/asm/xor_32.h | 2 +-
+ arch/x86/kernel/cpu/amd.c | 4 +--
+ arch/x86/kernel/cpu/common.c | 4 ++-
+ arch/x86/kernel/cpu/intel.c | 3 +-
+ arch/x86/kernel/cpu/intel_cacheinfo.c | 6 ++--
+ arch/x86/kernel/cpu/mtrr/generic.c | 2 +-
+ arch/x86/kernel/cpu/mtrr/main.c | 2 +-
+ arch/x86/kernel/cpu/perf_event_amd.c | 4 +--
+ arch/x86/kernel/cpu/perf_event_amd_uncore.c | 11 +++---
+ arch/x86/kernel/fpu/init.c | 4 +--
+ arch/x86/kernel/hw_breakpoint.c | 6 ++--
+ arch/x86/kernel/smpboot.c | 2 +-
+ arch/x86/kernel/vm86_32.c | 4 ++-
+ arch/x86/mm/setup_nx.c | 4 +--
+ drivers/char/hw_random/via-rng.c | 5 +--
+ drivers/crypto/padlock-aes.c | 2 +-
+ drivers/crypto/padlock-sha.c | 2 +-
+ drivers/iommu/intel_irq_remapping.c | 2 +-
+ fs/btrfs/disk-io.c | 2 +-
+ 24 files changed, 48 insertions(+), 68 deletions(-)
+
+diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c
+index 722bacea040e..8baaff5af0b5 100644
+--- a/arch/x86/crypto/chacha20_glue.c
++++ b/arch/x86/crypto/chacha20_glue.c
+@@ -125,7 +125,7 @@ static struct crypto_alg alg = {
+
+ static int __init chacha20_simd_mod_init(void)
+ {
+- if (!cpu_has_ssse3)
++ if (!boot_cpu_has(X86_FEATURE_SSSE3))
+ return -ENODEV;
+
+ #ifdef CONFIG_AS_AVX2
+diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
+index 81a595d75cf5..0e9871693f24 100644
+--- a/arch/x86/crypto/crc32c-intel_glue.c
++++ b/arch/x86/crypto/crc32c-intel_glue.c
+@@ -257,7 +257,7 @@ static int __init crc32c_intel_mod_init(void)
+ if (!x86_match_cpu(crc32c_cpu_id))
+ return -ENODEV;
+ #ifdef CONFIG_X86_64
+- if (cpu_has_pclmulqdq) {
++ if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) {
+ alg.update = crc32c_pcl_intel_update;
+ alg.finup = crc32c_pcl_intel_finup;
+ alg.digest = crc32c_pcl_intel_digest;
+diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
+index f7e142926481..e4959d023af8 100644
+--- a/arch/x86/include/asm/cmpxchg_32.h
++++ b/arch/x86/include/asm/cmpxchg_32.h
+@@ -109,6 +109,6 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
+
+ #endif
+
+-#define system_has_cmpxchg_double() cpu_has_cx8
++#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX8)
+
+ #endif /* _ASM_X86_CMPXCHG_32_H */
+diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
+index 1af94697aae5..caa23a34c963 100644
+--- a/arch/x86/include/asm/cmpxchg_64.h
++++ b/arch/x86/include/asm/cmpxchg_64.h
+@@ -18,6 +18,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 val)
+ cmpxchg_local((ptr), (o), (n)); \
+ })
+
+-#define system_has_cmpxchg_double() cpu_has_cx16
++#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX16)
+
+ #endif /* _ASM_X86_CMPXCHG_64_H */
+diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
+index 641f0f2c2982..dd7578e0fe0e 100644
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -368,58 +368,29 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
+ #define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
+
+ #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
+-#define cpu_has_de boot_cpu_has(X86_FEATURE_DE)
+ #define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE)
+ #define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC)
+ #define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE)
+ #define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
+-#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP)
+-#define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR)
+-#define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX)
+ #define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR)
+ #define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
+ #define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
+-#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
+-#define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3)
+ #define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
+ #define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
+ #define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2)
+-#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
+-#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
+-#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE)
+-#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN)
+-#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT)
+-#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN)
+-#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2)
+-#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN)
+-#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE)
+-#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN)
+-#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM)
+-#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN)
+-#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS)
+-#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS)
+ #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH)
+-#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS)
+ #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
+ #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
+ #define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
+-#define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1)
+-#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
+ #define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
+ #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
+-#define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT)
+ #define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES)
+ #define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
+ #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
+-#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
+-#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
+-#define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB)
+-#define cpu_has_perfctr_l2 boot_cpu_has(X86_FEATURE_PERFCTR_L2)
+-#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
+-#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
+-#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
+-#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
+-#define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
++/*
++ * Do not add any more of those clumsy macros - use static_cpu_has_safe() for
++ * fast paths and boot_cpu_has() otherwise!
++ */
+
+ #if __GNUC__ >= 4
+ extern void warn_pre_alternatives(void);
+diff --git a/arch/x86/include/asm/xor_32.h b/arch/x86/include/asm/xor_32.h
+index 5a08bc8bff33..c54beb44c4c1 100644
+--- a/arch/x86/include/asm/xor_32.h
++++ b/arch/x86/include/asm/xor_32.h
+@@ -553,7 +553,7 @@ do { \
+ if (cpu_has_xmm) { \
+ xor_speed(&xor_block_pIII_sse); \
+ xor_speed(&xor_block_sse_pf64); \
+- } else if (cpu_has_mmx) { \
++ } else if (boot_cpu_has(X86_FEATURE_MMX)) { \
+ xor_speed(&xor_block_pII_mmx); \
+ xor_speed(&xor_block_p5_mmx); \
+ } else { \
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 4bf9e77f3e05..f4fb8f5b0be4 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -304,7 +304,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
+ int cpu = smp_processor_id();
+
+ /* get information required for multi-node processors */
+- if (cpu_has_topoext) {
++ if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
+ u32 eax, ebx, ecx, edx;
+
+ cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
+@@ -954,7 +954,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
+
+ void set_dr_addr_mask(unsigned long mask, int dr)
+ {
+- if (!cpu_has_bpext)
++ if (!boot_cpu_has(X86_FEATURE_BPEXT))
+ return;
+
+ switch (dr) {
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 8eabbafff213..0498ad3702f5 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1539,7 +1539,9 @@ void cpu_init(void)
+
+ printk(KERN_INFO "Initializing CPU#%d\n", cpu);
+
+- if (cpu_feature_enabled(X86_FEATURE_VME) || cpu_has_tsc || cpu_has_de)
++ if (cpu_feature_enabled(X86_FEATURE_VME) ||
++ cpu_has_tsc ||
++ boot_cpu_has(X86_FEATURE_DE))
+ cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
+
+ load_current_idt();
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index 209ac1e7d1f0..565648bc1a0a 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -445,7 +445,8 @@ static void init_intel(struct cpuinfo_x86 *c)
+
+ if (cpu_has_xmm2)
+ set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
+- if (cpu_has_ds) {
++
++ if (boot_cpu_has(X86_FEATURE_DS)) {
+ unsigned int l1;
+ rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
+ if (!(l1 & (1<<11)))
+diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
+index b4ca91cf55b0..3fa72317ad78 100644
+--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
+@@ -591,7 +591,7 @@ cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
+ unsigned edx;
+
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+- if (cpu_has_topoext)
++ if (boot_cpu_has(X86_FEATURE_TOPOEXT))
+ cpuid_count(0x8000001d, index, &eax.full,
+ &ebx.full, &ecx.full, &edx);
+ else
+@@ -637,7 +637,7 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c)
+ void init_amd_cacheinfo(struct cpuinfo_x86 *c)
+ {
+
+- if (cpu_has_topoext) {
++ if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
+ num_cache_leaves = find_num_cache_leaves(c);
+ } else if (c->extended_cpuid_level >= 0x80000006) {
+ if (cpuid_edx(0x80000006) & 0xf000)
+@@ -809,7 +809,7 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
+ struct cacheinfo *this_leaf;
+ int i, sibling;
+
+- if (cpu_has_topoext) {
++ if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
+ unsigned int apicid, nshared, first, last;
+
+ this_leaf = this_cpu_ci->info_list + index;
+diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
+index b5624fafa44a..136ae86f4f5f 100644
+--- a/arch/x86/kernel/cpu/mtrr/generic.c
++++ b/arch/x86/kernel/cpu/mtrr/generic.c
+@@ -349,7 +349,7 @@ static void get_fixed_ranges(mtrr_type *frs)
+
+ void mtrr_save_fixed_ranges(void *info)
+ {
+- if (cpu_has_mtrr)
++ if (boot_cpu_has(X86_FEATURE_MTRR))
+ get_fixed_ranges(mtrr_state.fixed_ranges);
+ }
+
+diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
+index fa77ac8291f0..f924f41af89a 100644
+--- a/arch/x86/kernel/cpu/mtrr/main.c
++++ b/arch/x86/kernel/cpu/mtrr/main.c
+@@ -682,7 +682,7 @@ void __init mtrr_bp_init(void)
+
+ phys_addr = 32;
+
+- if (cpu_has_mtrr) {
++ if (boot_cpu_has(X86_FEATURE_MTRR)) {
+ mtrr_if = &generic_mtrr_ops;
+ size_or_mask = SIZE_OR_MASK_BITS(36);
+ size_and_mask = 0x00f00000;
+diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
+index 1cee5d2d7ece..3ea177cb7366 100644
+--- a/arch/x86/kernel/cpu/perf_event_amd.c
++++ b/arch/x86/kernel/cpu/perf_event_amd.c
+@@ -160,7 +160,7 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel)
+ if (offset)
+ return offset;
+
+- if (!cpu_has_perfctr_core)
++ if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
+ offset = index;
+ else
+ offset = index << 1;
+@@ -652,7 +652,7 @@ static __initconst const struct x86_pmu amd_pmu = {
+
+ static int __init amd_core_pmu_init(void)
+ {
+- if (!cpu_has_perfctr_core)
++ if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
+ return 0;
+
+ switch (boot_cpu_data.x86) {
+diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
+index cc6cedb8f25d..49742746a6c9 100644
+--- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c
++++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
+@@ -523,10 +523,10 @@ static int __init amd_uncore_init(void)
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ goto fail_nodev;
+
+- if (!cpu_has_topoext)
++ if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
+ goto fail_nodev;
+
+- if (cpu_has_perfctr_nb) {
++ if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
+ amd_uncore_nb = alloc_percpu(struct amd_uncore *);
+ if (!amd_uncore_nb) {
+ ret = -ENOMEM;
+@@ -540,7 +540,7 @@ static int __init amd_uncore_init(void)
+ ret = 0;
+ }
+
+- if (cpu_has_perfctr_l2) {
++ if (boot_cpu_has(X86_FEATURE_PERFCTR_L2)) {
+ amd_uncore_l2 = alloc_percpu(struct amd_uncore *);
+ if (!amd_uncore_l2) {
+ ret = -ENOMEM;
+@@ -583,10 +583,11 @@ static int __init amd_uncore_init(void)
+
+ /* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */
+ amd_uncore_nb = amd_uncore_l2 = NULL;
+- if (cpu_has_perfctr_l2)
++
++ if (boot_cpu_has(X86_FEATURE_PERFCTR_L2))
+ perf_pmu_unregister(&amd_l2_pmu);
+ fail_l2:
+- if (cpu_has_perfctr_nb)
++ if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
+ perf_pmu_unregister(&amd_nb_pmu);
+ if (amd_uncore_l2)
+ free_percpu(amd_uncore_l2);
+diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
+index 06ea88b4ca44..72577ad8c1fb 100644
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -15,7 +15,7 @@
+ */
+ static void fpu__init_cpu_ctx_switch(void)
+ {
+- if (!cpu_has_eager_fpu)
++ if (!boot_cpu_has(X86_FEATURE_EAGER_FPU))
+ stts();
+ else
+ clts();
+@@ -279,7 +279,7 @@ static void __init fpu__init_system_ctx_switch(void)
+ current_thread_info()->status = 0;
+
+ /* Auto enable eagerfpu for xsaveopt */
+- if (cpu_has_xsaveopt && eagerfpu != DISABLE)
++ if (boot_cpu_has(X86_FEATURE_XSAVEOPT) && eagerfpu != DISABLE)
+ eagerfpu = ENABLE;
+
+ if (xfeatures_mask & XFEATURE_MASK_EAGER) {
+diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
+index 50a3fad5b89f..2bcfb5f2bc44 100644
+--- a/arch/x86/kernel/hw_breakpoint.c
++++ b/arch/x86/kernel/hw_breakpoint.c
+@@ -300,6 +300,10 @@ static int arch_build_bp_info(struct perf_event *bp)
+ return -EINVAL;
+ if (bp->attr.bp_addr & (bp->attr.bp_len - 1))
+ return -EINVAL;
++
++ if (!boot_cpu_has(X86_FEATURE_BPEXT))
++ return -EOPNOTSUPP;
++
+ /*
+ * It's impossible to use a range breakpoint to fake out
+ * user vs kernel detection because bp_len - 1 can't
+@@ -307,8 +311,6 @@ static int arch_build_bp_info(struct perf_event *bp)
+ * breakpoints, then we'll have to check for kprobe-blacklisted
+ * addresses anywhere in the range.
+ */
+- if (!cpu_has_bpext)
+- return -EOPNOTSUPP;
+ info->mask = bp->attr.bp_len - 1;
+ info->len = X86_BREAKPOINT_LEN_1;
+ }
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 48ca93242bfd..1f7aefc7b0b4 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -295,7 +295,7 @@ do { \
+
+ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
+ {
+- if (cpu_has_topoext) {
++ if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
+ int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
+
+ if (c->phys_proc_id == o->phys_proc_id &&
+diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
+index af57736a0309..d6d64a519559 100644
+--- a/arch/x86/kernel/vm86_32.c
++++ b/arch/x86/kernel/vm86_32.c
+@@ -357,8 +357,10 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
+ tss = &per_cpu(cpu_tss, get_cpu());
+ /* make room for real-mode segments */
+ tsk->thread.sp0 += 16;
+- if (cpu_has_sep)
++
++ if (static_cpu_has_safe(X86_FEATURE_SEP))
+ tsk->thread.sysenter_cs = 0;
++
+ load_sp0(tss, &tsk->thread);
+ put_cpu();
+
+diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
+index 90555bf60aa4..92e2eacb3321 100644
+--- a/arch/x86/mm/setup_nx.c
++++ b/arch/x86/mm/setup_nx.c
+@@ -31,7 +31,7 @@ early_param("noexec", noexec_setup);
+
+ void x86_configure_nx(void)
+ {
+- if (cpu_has_nx && !disable_nx)
++ if (boot_cpu_has(X86_FEATURE_NX) && !disable_nx)
+ __supported_pte_mask |= _PAGE_NX;
+ else
+ __supported_pte_mask &= ~_PAGE_NX;
+@@ -39,7 +39,7 @@ void x86_configure_nx(void)
+
+ void __init x86_report_nx(void)
+ {
+- if (!cpu_has_nx) {
++ if (!boot_cpu_has(X86_FEATURE_NX)) {
+ printk(KERN_NOTICE "Notice: NX (Execute Disable) protection "
+ "missing in CPU!\n");
+ } else {
+diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
+index 0c98a9d51a24..44ce80606944 100644
+--- a/drivers/char/hw_random/via-rng.c
++++ b/drivers/char/hw_random/via-rng.c
+@@ -140,7 +140,7 @@ static int via_rng_init(struct hwrng *rng)
+ * RNG configuration like it used to be the case in this
+ * register */
+ if ((c->x86 == 6) && (c->x86_model >= 0x0f)) {
+- if (!cpu_has_xstore_enabled) {
++ if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) {
+ pr_err(PFX "can't enable hardware RNG "
+ "if XSTORE is not enabled\n");
+ return -ENODEV;
+@@ -200,8 +200,9 @@ static int __init mod_init(void)
+ {
+ int err;
+
+- if (!cpu_has_xstore)
++ if (!boot_cpu_has(X86_FEATURE_XSTORE))
+ return -ENODEV;
++
+ pr_info("VIA RNG detected\n");
+ err = hwrng_register(&via_rng);
+ if (err) {
+diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
+index da2d6777bd09..97a364694bfc 100644
+--- a/drivers/crypto/padlock-aes.c
++++ b/drivers/crypto/padlock-aes.c
+@@ -515,7 +515,7 @@ static int __init padlock_init(void)
+ if (!x86_match_cpu(padlock_cpu_id))
+ return -ENODEV;
+
+- if (!cpu_has_xcrypt_enabled) {
++ if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) {
+ printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
+ return -ENODEV;
+ }
+diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
+index 4e154c9b9206..8c5f90647b7a 100644
+--- a/drivers/crypto/padlock-sha.c
++++ b/drivers/crypto/padlock-sha.c
+@@ -540,7 +540,7 @@ static int __init padlock_init(void)
+ struct shash_alg *sha1;
+ struct shash_alg *sha256;
+
+- if (!x86_match_cpu(padlock_sha_ids) || !cpu_has_phe_enabled)
++ if (!x86_match_cpu(padlock_sha_ids) || !boot_cpu_has(X86_FEATURE_PHE_EN))
+ return -ENODEV;
+
+ /* Register the newly added algorithm module if on *
+diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
+index e9b241b1c9dd..ac596928f6b4 100644
+--- a/drivers/iommu/intel_irq_remapping.c
++++ b/drivers/iommu/intel_irq_remapping.c
+@@ -753,7 +753,7 @@ static inline void set_irq_posting_cap(void)
+ * should have X86_FEATURE_CX16 support, this has been confirmed
+ * with Intel hardware guys.
+ */
+- if ( cpu_has_cx16 )
++ if (boot_cpu_has(X86_FEATURE_CX16))
+ intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP;
+
+ for_each_iommu(iommu, drhd)
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 208b3f5ffb3f..7efd70bfeaf7 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -923,7 +923,7 @@ static int check_async_write(struct inode *inode, unsigned long bio_flags)
+ if (bio_flags & EXTENT_BIO_TREE_LOG)
+ return 0;
+ #ifdef CONFIG_X86
+- if (cpu_has_xmm4_2)
++ if (static_cpu_has_safe(X86_FEATURE_XMM4_2))
+ return 0;
+ #endif
+ return 1;
+--
+2.17.1
+
diff --git a/patches.suse/0003-x86-fpu-Disable-MPX-when-eagerfpu-is-off.patch b/patches.kernel.org/4.4.138-004-x86-fpu-Disable-MPX-when-eagerfpu-is-off.patch
index bc311a805f..1bc9ed8028 100644
--- a/patches.suse/0003-x86-fpu-Disable-MPX-when-eagerfpu-is-off.patch
+++ b/patches.kernel.org/4.4.138-004-x86-fpu-Disable-MPX-when-eagerfpu-is-off.patch
@@ -1,9 +1,11 @@
From: yu-cheng yu <yu-cheng.yu@intel.com>
Date: Wed, 6 Jan 2016 14:24:53 -0800
-Subject: x86/fpu: Disable MPX when eagerfpu is off
+Subject: [PATCH] x86/fpu: Disable MPX when eagerfpu is off
+Patch-mainline: 4.4.138
+References: CVE-2018-3665 bnc#1012382 bnc#1087086
Git-commit: a5fe93a549c54838063d2952dd9643b0b18aa67f
-Patch-mainline: v4.5-rc1
-References: bnc#1087086 CVE-2018-3665
+
+commit a5fe93a549c54838063d2952dd9643b0b18aa67f upstream.
This issue is a fallout from the command-line parsing move.
@@ -30,13 +32,16 @@ Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: yu-cheng yu <yu-cheng.yu@intel.com>
Link: http://lkml.kernel.org/r/1452119094-7252-4-git-send-email-yu-cheng.yu@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Jiri Slaby <jslaby@suse.cz>
---
- arch/x86/include/asm/fpu/internal.h | 1
- arch/x86/kernel/fpu/init.c | 56 ++++++++++++++++++++++++++++--------
- arch/x86/kernel/fpu/xstate.c | 3 -
+ arch/x86/include/asm/fpu/internal.h | 1 +
+ arch/x86/kernel/fpu/init.c | 56 ++++++++++++++++++++++-------
+ arch/x86/kernel/fpu/xstate.c | 3 +-
3 files changed, 46 insertions(+), 14 deletions(-)
+diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
+index 3c3550c3a4a3..6b07a842f3aa 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -42,6 +42,7 @@ extern void fpu__init_cpu_xstate(void);
@@ -47,12 +52,15 @@ Signed-off-by: Jiri Slaby <jslaby@suse.cz>
/*
* Debugging facility:
+diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
+index 72577ad8c1fb..679a04639d0f 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
-@@ -275,7 +275,45 @@ static void __init fpu__init_system_xsta
+@@ -265,8 +265,46 @@ static void __init fpu__init_system_xstate_size_legacy(void)
+ */
static enum { AUTO, ENABLE, DISABLE } eagerfpu = AUTO;
- /*
++/*
+ * Find supported xfeatures based on cpu features and command-line input.
+ * This must be called after fpu__init_parse_early_param() is called and
+ * xfeatures_mask is enumerated.
@@ -81,7 +89,7 @@ Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+ setup_clear_cpu_cap(X86_FEATURE_MPX);
+}
+
-+/*
+ /*
* Pick the FPU context switching strategy:
+ *
+ * When eagerfpu is AUTO or ENABLE, we ensure it is ENABLE if either of
@@ -95,12 +103,12 @@ Signed-off-by: Jiri Slaby <jslaby@suse.cz>
*/
static void __init fpu__init_system_ctx_switch(void)
{
-@@ -287,19 +325,11 @@ static void __init fpu__init_system_ctx_
+@@ -278,19 +316,11 @@ static void __init fpu__init_system_ctx_switch(void)
WARN_ON_FPU(current->thread.fpu.fpstate_active);
current_thread_info()->status = 0;
- /* Auto enable eagerfpu for xsaveopt */
- if (cpu_has_xsaveopt && eagerfpu != DISABLE)
+ if (boot_cpu_has(X86_FEATURE_XSAVEOPT) && eagerfpu != DISABLE)
eagerfpu = ENABLE;
- if (xfeatures_mask & XFEATURE_MASK_EAGER) {
@@ -117,7 +125,7 @@ Signed-off-by: Jiri Slaby <jslaby@suse.cz>
if (eagerfpu == ENABLE)
setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);
-@@ -317,10 +347,12 @@ static void __init fpu__init_parse_early
+@@ -308,10 +338,12 @@ static void __init fpu__init_parse_early_param(void)
* No need to check "eagerfpu=auto" again, since it is the
* initial default.
*/
@@ -132,9 +140,11 @@ Signed-off-by: Jiri Slaby <jslaby@suse.cz>
if (cmdline_find_option_bool(boot_command_line, "no387"))
setup_clear_cpu_cap(X86_FEATURE_FPU);
+diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
+index 70fc312221fc..3fa200ecca62 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
-@@ -633,8 +633,7 @@ void __init fpu__init_system_xstate(void
+@@ -632,8 +632,7 @@ void __init fpu__init_system_xstate(void)
BUG();
}
@@ -144,3 +154,6 @@ Signed-off-by: Jiri Slaby <jslaby@suse.cz>
/* Enable xstate instructions to be able to continue with initialization: */
fpu__init_cpu_xstate();
+--
+2.17.1
+
diff --git a/patches.kernel.org/4.4.138-005-x86-fpu-Disable-AVX-when-eagerfpu-is-off.patch b/patches.kernel.org/4.4.138-005-x86-fpu-Disable-AVX-when-eagerfpu-is-off.patch
new file mode 100644
index 0000000000..19ad41ea5f
--- /dev/null
+++ b/patches.kernel.org/4.4.138-005-x86-fpu-Disable-AVX-when-eagerfpu-is-off.patch
@@ -0,0 +1,107 @@
+From: yu-cheng yu <yu-cheng.yu@intel.com>
+Date: Wed, 6 Jan 2016 14:24:54 -0800
+Subject: [PATCH] x86/fpu: Disable AVX when eagerfpu is off
+References: bnc#1012382
+Patch-mainline: 4.4.138
+Git-commit: 394db20ca240741a08d472173db13d6f6a6e5a28
+
+commit 394db20ca240741a08d472173db13d6f6a6e5a28 upstream.
+
+When "eagerfpu=off" is given as a command-line input, the kernel
+should disable AVX support.
+
+The Task Switched bit used for lazy context switching does not
+support AVX. If AVX is enabled without eagerfpu context
+switching, one task's AVX state could become corrupted or leak
+to other tasks. This is a bug and has bad security implications.
+
+This only affects systems that have AVX/AVX2/AVX512 and this
+issue will be found only when one actually uses AVX/AVX2/AVX512
+_AND_ does eagerfpu=off.
+
+Reference: Intel Software Developer's Manual Vol. 3A
+
+Sec. 2.5 Control Registers:
+TS Task Switched bit (bit 3 of CR0) -- Allows the saving of the
+x87 FPU/ MMX/SSE/SSE2/SSE3/SSSE3/SSE4 context on a task switch
+to be delayed until an x87 FPU/MMX/SSE/SSE2/SSE3/SSSE3/SSE4
+instruction is actually executed by the new task.
+
+Sec. 13.4.1 Using the TS Flag to Control the Saving of the X87
+FPU and SSE State
+When the TS flag is set, the processor monitors the instruction
+stream for x87 FPU, MMX, SSE instructions. When the processor
+detects one of these instructions, it raises a
+device-not-available exeception (#NM) prior to executing the
+instruction.
+
+Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
+Cc: Ravi V. Shankar <ravi.v.shankar@intel.com>
+Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: yu-cheng yu <yu-cheng.yu@intel.com>
+Link: http://lkml.kernel.org/r/1452119094-7252-5-git-send-email-yu-cheng.yu@intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ arch/x86/include/asm/fpu/xstate.h | 11 ++++++-----
+ arch/x86/kernel/fpu/init.c | 6 ++++++
+ 2 files changed, 12 insertions(+), 5 deletions(-)
+
+diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h
+index 3a6c89b70307..af30fdeb140d 100644
+--- a/arch/x86/include/asm/fpu/xstate.h
++++ b/arch/x86/include/asm/fpu/xstate.h
+@@ -20,15 +20,16 @@
+
+ /* Supported features which support lazy state saving */
+ #define XFEATURE_MASK_LAZY (XFEATURE_MASK_FP | \
+- XFEATURE_MASK_SSE | \
++ XFEATURE_MASK_SSE)
++
++/* Supported features which require eager state saving */
++#define XFEATURE_MASK_EAGER (XFEATURE_MASK_BNDREGS | \
++ XFEATURE_MASK_BNDCSR | \
+ XFEATURE_MASK_YMM | \
+- XFEATURE_MASK_OPMASK | \
++ XFEATURE_MASK_OPMASK | \
+ XFEATURE_MASK_ZMM_Hi256 | \
+ XFEATURE_MASK_Hi16_ZMM)
+
+-/* Supported features which require eager state saving */
+-#define XFEATURE_MASK_EAGER (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)
+-
+ /* All currently supported features */
+ #define XCNTXT_MASK (XFEATURE_MASK_LAZY | XFEATURE_MASK_EAGER)
+
+diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
+index 679a04639d0f..81a8732b94f3 100644
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -292,6 +292,12 @@ u64 __init fpu__get_supported_xfeatures_mask(void)
+ static void __init fpu__clear_eager_fpu_features(void)
+ {
+ setup_clear_cpu_cap(X86_FEATURE_MPX);
++ setup_clear_cpu_cap(X86_FEATURE_AVX);
++ setup_clear_cpu_cap(X86_FEATURE_AVX2);
++ setup_clear_cpu_cap(X86_FEATURE_AVX512F);
++ setup_clear_cpu_cap(X86_FEATURE_AVX512PF);
++ setup_clear_cpu_cap(X86_FEATURE_AVX512ER);
++ setup_clear_cpu_cap(X86_FEATURE_AVX512CD);
+ }
+
+ /*
+--
+2.17.1
+
diff --git a/patches.suse/0005-x86-fpu-Default-eagerfpu-on-on-all-CPUs.patch b/patches.kernel.org/4.4.138-006-x86-fpu-Default-eagerfpu-on-on-all-CPUs.patch
index 06af0e7c9a..11276eb822 100644
--- a/patches.suse/0005-x86-fpu-Default-eagerfpu-on-on-all-CPUs.patch
+++ b/patches.kernel.org/4.4.138-006-x86-fpu-Default-eagerfpu-on-on-all-CPUs.patch
@@ -1,9 +1,11 @@
From: Andy Lutomirski <luto@kernel.org>
Date: Sun, 24 Jan 2016 14:38:10 -0800
-Subject: x86/fpu: Default eagerfpu=on on all CPUs
+Subject: [PATCH] x86/fpu: Default eagerfpu=on on all CPUs
+Patch-mainline: 4.4.138
+References: CVE-2018-3665 bnc#1012382 bnc#1087086
Git-commit: 58122bf1d856a4ea9581d62a07c557d997d46a19
-Patch-mainline: v4.6-rc1
-References: bnc#1087086 CVE-2018-3665
+
+commit 58122bf1d856a4ea9581d62a07c557d997d46a19 upstream.
We have eager and lazy FPU modes, introduced in:
@@ -40,14 +42,17 @@ Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: yu-cheng yu <yu-cheng.yu@intel.com>
Link: http://lkml.kernel.org/r/ac290de61bf08d9cfc2664a4f5080257ffc1075a.1453675014.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Jiri Slaby <jslaby@suse.cz>
---
- arch/x86/kernel/fpu/init.c | 13 +++++--------
+ arch/x86/kernel/fpu/init.c | 13 +++++--------
1 file changed, 5 insertions(+), 8 deletions(-)
+diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
+index 81a8732b94f3..cefa5b510fc5 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
-@@ -263,7 +263,10 @@ static void __init fpu__init_system_xsta
+@@ -252,7 +252,10 @@ static void __init fpu__init_system_xstate_size_legacy(void)
* not only saved the restores along the way, but we also have the
* FPU ready to be used for the original task.
*
@@ -59,7 +64,7 @@ Signed-off-by: Jiri Slaby <jslaby@suse.cz>
* state during every context switch, regardless of whether the task
* has used FPU instructions in that time slice or not. This is done
* because modern FPU context saving instructions are able to optimize
-@@ -274,7 +277,7 @@ static void __init fpu__init_system_xsta
+@@ -263,7 +266,7 @@ static void __init fpu__init_system_xstate_size_legacy(void)
* to use 'eager' restores, if we detect that a task is using the FPU
* frequently. See the fpu->counter logic in fpu/internal.h for that. ]
*/
@@ -68,7 +73,7 @@ Signed-off-by: Jiri Slaby <jslaby@suse.cz>
/*
* Find supported xfeatures based on cpu features and command-line input.
-@@ -345,15 +348,9 @@ static void __init fpu__init_system_ctx_
+@@ -340,15 +343,9 @@ static void __init fpu__init_system_ctx_switch(void)
*/
static void __init fpu__init_parse_early_param(void)
{
@@ -84,3 +89,6 @@ Signed-off-by: Jiri Slaby <jslaby@suse.cz>
}
if (cmdline_find_option_bool(boot_command_line, "no387"))
+--
+2.17.1
+
diff --git a/patches.suse/0001-x86-fpu-Fix-no387-regression.patch b/patches.kernel.org/4.4.138-007-x86-fpu-Fix-no387-regression.patch
index f34da85b97..c2ea42d8b5 100644
--- a/patches.suse/0001-x86-fpu-Fix-no387-regression.patch
+++ b/patches.kernel.org/4.4.138-007-x86-fpu-Fix-no387-regression.patch
@@ -1,9 +1,11 @@
From: Andy Lutomirski <luto@kernel.org>
Date: Thu, 21 Jan 2016 15:24:31 -0800
-Subject: x86/fpu: Fix 'no387' regression
+Subject: [PATCH] x86/fpu: Fix 'no387' regression
+Patch-mainline: 4.4.138
+References: CVE-2018-3665 bnc#1012382 bnc#1087086
Git-commit: f363938c70a04e6bc99023a5e0c44ef7879b903f
-Patch-mainline: v4.5
-References: bnc#1087086 CVE-2018-3665
+
+commit f363938c70a04e6bc99023a5e0c44ef7879b903f upstream.
After fixing FPU option parsing, we now parse the 'no387' boot option
too early: no387 clears X86_FEATURE_FPU before it's even probed, so
@@ -31,14 +33,17 @@ Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: yu-cheng yu <yu-cheng.yu@intel.com>
Fixes: 4f81cbafcce2 ("x86/fpu: Fix early FPU command-line parsing")
Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Jiri Slaby <jslaby@suse.cz>
---
- arch/x86/kernel/fpu/init.c | 14 ++++++++------
+ arch/x86/kernel/fpu/init.c | 14 ++++++++------
1 file changed, 8 insertions(+), 6 deletions(-)
+diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
+index cefa5b510fc5..a2b1b67758e6 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
-@@ -78,13 +78,15 @@ static void fpu__init_system_early_gener
+@@ -78,13 +78,15 @@ static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
write_cr0(cr0);
@@ -60,3 +65,6 @@ Signed-off-by: Jiri Slaby <jslaby@suse.cz>
#ifndef CONFIG_MATH_EMULATION
if (!cpu_has_fpu) {
+--
+2.17.1
+
diff --git a/patches.kernel.org/4.4.138-008-x86-fpu-Revert-x86-fpu-Disable-AVX-when-eager.patch b/patches.kernel.org/4.4.138-008-x86-fpu-Revert-x86-fpu-Disable-AVX-when-eager.patch
new file mode 100644
index 0000000000..b5606acd2d
--- /dev/null
+++ b/patches.kernel.org/4.4.138-008-x86-fpu-Revert-x86-fpu-Disable-AVX-when-eager.patch
@@ -0,0 +1,100 @@
+From: Yu-cheng Yu <yu-cheng.yu@intel.com>
+Date: Wed, 9 Mar 2016 16:28:54 -0800
+Subject: [PATCH] x86/fpu: Revert ("x86/fpu: Disable AVX when eagerfpu is off")
+References: bnc#1012382
+Patch-mainline: 4.4.138
+Git-commit: a65050c6f17e52442716138d48d0a47301a8344b
+
+commit a65050c6f17e52442716138d48d0a47301a8344b upstream.
+
+Leonid Shatz noticed that the SDM interpretation of the following
+recent commit:
+
+ 394db20ca240741 ("x86/fpu: Disable AVX when eagerfpu is off")
+
+... is incorrect and that the original behavior of the FPU code was correct.
+
+Because AVX is not stated in CR0 TS bit description, it was mistakenly
+believed to be not supported for lazy context switch. This turns out
+to be false:
+
+ Intel Software Developer's Manual Vol. 3A, Sec. 2.5 Control Registers:
+
+ 'TS Task Switched bit (bit 3 of CR0) -- Allows the saving of the x87 FPU/
+ MMX/SSE/SSE2/SSE3/SSSE3/SSE4 context on a task switch to be delayed until
+ an x87 FPU/MMX/SSE/SSE2/SSE3/SSSE3/SSE4 instruction is actually executed
+ by the new task.'
+
+ Intel Software Developer's Manual Vol. 2A, Sec. 2.4 Instruction Exception
+ Specification:
+
+ 'AVX instructions refer to exceptions by classes that include #NM
+ "Device Not Available" exception for lazy context switch.'
+
+So revert the commit.
+
+Reported-by: Leonid Shatz <leonid.shatz@ravellosystems.com>
+Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Ravi V. Shankar <ravi.v.shankar@intel.com>
+Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1457569734-3785-1-git-send-email-yu-cheng.yu@intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ arch/x86/include/asm/fpu/xstate.h | 9 ++++-----
+ arch/x86/kernel/fpu/init.c | 6 ------
+ 2 files changed, 4 insertions(+), 11 deletions(-)
+
+diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h
+index af30fdeb140d..f23cd8c80b1c 100644
+--- a/arch/x86/include/asm/fpu/xstate.h
++++ b/arch/x86/include/asm/fpu/xstate.h
+@@ -20,16 +20,15 @@
+
+ /* Supported features which support lazy state saving */
+ #define XFEATURE_MASK_LAZY (XFEATURE_MASK_FP | \
+- XFEATURE_MASK_SSE)
+-
+-/* Supported features which require eager state saving */
+-#define XFEATURE_MASK_EAGER (XFEATURE_MASK_BNDREGS | \
+- XFEATURE_MASK_BNDCSR | \
++ XFEATURE_MASK_SSE | \
+ XFEATURE_MASK_YMM | \
+ XFEATURE_MASK_OPMASK | \
+ XFEATURE_MASK_ZMM_Hi256 | \
+ XFEATURE_MASK_Hi16_ZMM)
+
++/* Supported features which require eager state saving */
++#define XFEATURE_MASK_EAGER (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)
++
+ /* All currently supported features */
+ #define XCNTXT_MASK (XFEATURE_MASK_LAZY | XFEATURE_MASK_EAGER)
+
+diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
+index a2b1b67758e6..c3fd07a9621a 100644
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -297,12 +297,6 @@ u64 __init fpu__get_supported_xfeatures_mask(void)
+ static void __init fpu__clear_eager_fpu_features(void)
+ {
+ setup_clear_cpu_cap(X86_FEATURE_MPX);
+- setup_clear_cpu_cap(X86_FEATURE_AVX);
+- setup_clear_cpu_cap(X86_FEATURE_AVX2);
+- setup_clear_cpu_cap(X86_FEATURE_AVX512F);
+- setup_clear_cpu_cap(X86_FEATURE_AVX512PF);
+- setup_clear_cpu_cap(X86_FEATURE_AVX512ER);
+- setup_clear_cpu_cap(X86_FEATURE_AVX512CD);
+ }
+
+ /*
+--
+2.17.1
+
diff --git a/patches.suse/0001-x86-fpu-Fix-eager-FPU-handling-on-legacy-FPU-machine.patch b/patches.kernel.org/4.4.138-009-x86-fpu-Fix-eager-FPU-handling-on-legacy-FPU-.patch
index c7664c15d1..8b3ae86d23 100644
--- a/patches.suse/0001-x86-fpu-Fix-eager-FPU-handling-on-legacy-FPU-machine.patch
+++ b/patches.kernel.org/4.4.138-009-x86-fpu-Fix-eager-FPU-handling-on-legacy-FPU-.patch
@@ -1,9 +1,11 @@
From: Borislav Petkov <bp@alien8.de>
Date: Fri, 11 Mar 2016 12:32:06 +0100
-Subject: x86/fpu: Fix eager-FPU handling on legacy FPU machines
+Subject: [PATCH] x86/fpu: Fix eager-FPU handling on legacy FPU machines
+Patch-mainline: 4.4.138
+References: CVE-2018-3665 bnc#1012382 bnc#1087086
Git-commit: 6e6867093de35141f0a76b66ac13f9f2e2c8e77a
-Patch-mainline: v4.5
-References: bnc#1087086 CVE-2018-3665
+
+commit 6e6867093de35141f0a76b66ac13f9f2e2c8e77a upstream.
i486 derived cores like Intel Quark support only the very old,
legacy x87 FPU (FSAVE/FRSTOR, CPUID bit FXSR is not set), and
@@ -46,15 +48,18 @@ Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Yu-cheng <yu-cheng.yu@intel.com>
Link: http://lkml.kernel.org/r/20160311113206.GD4312@pd.tnic
Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Jiri Slaby <jslaby@suse.cz>
---
- arch/x86/kernel/fpu/core.c | 4 +++-
- arch/x86/kernel/fpu/init.c | 2 +-
+ arch/x86/kernel/fpu/core.c | 4 +++-
+ arch/x86/kernel/fpu/init.c | 2 +-
2 files changed, 4 insertions(+), 2 deletions(-)
+diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
+index d25097c3fc1d..d5804adfa6da 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
-@@ -409,8 +409,10 @@ static inline void copy_init_fpstate_to_
+@@ -409,8 +409,10 @@ static inline void copy_init_fpstate_to_fpregs(void)
{
if (use_xsave())
copy_kernel_to_xregs(&init_fpstate.xsave, -1);
@@ -66,9 +71,11 @@ Signed-off-by: Jiri Slaby <jslaby@suse.cz>
}
/*
+diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
+index c3fd07a9621a..02a2b73e3f6b 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
-@@ -135,7 +135,7 @@ static void __init fpu__init_system_gene
+@@ -135,7 +135,7 @@ static void __init fpu__init_system_generic(void)
* Set up the legacy init FPU context. (xstate init might overwrite this
* with a more modern format, if the CPU supports it.)
*/
@@ -77,3 +84,6 @@ Signed-off-by: Jiri Slaby <jslaby@suse.cz>
fpu__init_system_mxcsr();
}
+--
+2.17.1
+
diff --git a/patches.kernel.org/4.4.138-010-x86-fpu-Hard-disable-lazy-FPU-mode.patch b/patches.kernel.org/4.4.138-010-x86-fpu-Hard-disable-lazy-FPU-mode.patch
new file mode 100644
index 0000000000..49612636d3
--- /dev/null
+++ b/patches.kernel.org/4.4.138-010-x86-fpu-Hard-disable-lazy-FPU-mode.patch
@@ -0,0 +1,206 @@
+From: Andy Lutomirski <luto@kernel.org>
+Date: Tue, 4 Oct 2016 20:34:31 -0400
+Subject: [PATCH] x86/fpu: Hard-disable lazy FPU mode
+References: bnc#1012382
+Patch-mainline: 4.4.138
+Git-commit: ca6938a1cd8a1c5e861a99b67f84ac166fc2b9e7
+
+commit ca6938a1cd8a1c5e861a99b67f84ac166fc2b9e7 upstream.
+
+Since commit:
+
+ 58122bf1d856 ("x86/fpu: Default eagerfpu=on on all CPUs")
+
+... in Linux 4.6, eager FPU mode has been the default on all x86
+systems, and no one has reported any regressions.
+
+This patch removes the ability to enable lazy mode: use_eager_fpu()
+becomes "return true" and all of the FPU mode selection machinery is
+removed.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Signed-off-by: Rik van Riel <riel@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: pbonzini@redhat.com
+Link: http://lkml.kernel.org/r/1475627678-20788-3-git-send-email-riel@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ arch/x86/include/asm/cpufeature.h | 2 +-
+ arch/x86/include/asm/fpu/internal.h | 2 +-
+ arch/x86/kernel/fpu/init.c | 91 +----------------------------
+ 3 files changed, 5 insertions(+), 90 deletions(-)
+
+diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
+index dd7578e0fe0e..232621c5e859 100644
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -104,7 +104,7 @@
+ #define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
+ #define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
+ #define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
+-#define X86_FEATURE_EAGER_FPU ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
++/* free, was #define X86_FEATURE_EAGER_FPU ( 3*32+29) * "eagerfpu" Non lazy FPU restore */
+ #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
+
+ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
+diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
+index 6b07a842f3aa..84de3d4f675c 100644
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -58,7 +58,7 @@ extern u64 fpu__get_supported_xfeatures_mask(void);
+ */
+ static __always_inline __pure bool use_eager_fpu(void)
+ {
+- return static_cpu_has_safe(X86_FEATURE_EAGER_FPU);
++ return true;
+ }
+
+ static __always_inline __pure bool use_xsaveopt(void)
+diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
+index 02a2b73e3f6b..954517285fa2 100644
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -15,10 +15,7 @@
+ */
+ static void fpu__init_cpu_ctx_switch(void)
+ {
+- if (!boot_cpu_has(X86_FEATURE_EAGER_FPU))
+- stts();
+- else
+- clts();
++ clts();
+ }
+
+ /*
+@@ -234,42 +231,6 @@ static void __init fpu__init_system_xstate_size_legacy(void)
+ setup_clear_cpu_cap(X86_FEATURE_XSAVES);
+ }
+
+-/*
+- * FPU context switching strategies:
+- *
+- * Against popular belief, we don't do lazy FPU saves, due to the
+- * task migration complications it brings on SMP - we only do
+- * lazy FPU restores.
+- *
+- * 'lazy' is the traditional strategy, which is based on setting
+- * CR0::TS to 1 during context-switch (instead of doing a full
+- * restore of the FPU state), which causes the first FPU instruction
+- * after the context switch (whenever it is executed) to fault - at
+- * which point we lazily restore the FPU state into FPU registers.
+- *
+- * Tasks are of course under no obligation to execute FPU instructions,
+- * so it can easily happen that another context-switch occurs without
+- * a single FPU instruction being executed. If we eventually switch
+- * back to the original task (that still owns the FPU) then we have
+- * not only saved the restores along the way, but we also have the
+- * FPU ready to be used for the original task.
+- *
+- * 'lazy' is deprecated because it's almost never a performance win
+- * and it's much more complicated than 'eager'.
+- *
+- * 'eager' switching is by default on all CPUs, there we switch the FPU
+- * state during every context switch, regardless of whether the task
+- * has used FPU instructions in that time slice or not. This is done
+- * because modern FPU context saving instructions are able to optimize
+- * state saving and restoration in hardware: they can detect both
+- * unused and untouched FPU state and optimize accordingly.
+- *
+- * [ Note that even in 'lazy' mode we might optimize context switches
+- * to use 'eager' restores, if we detect that a task is using the FPU
+- * frequently. See the fpu->counter logic in fpu/internal.h for that. ]
+- */
+-static enum { ENABLE, DISABLE } eagerfpu = ENABLE;
+-
+ /*
+ * Find supported xfeatures based on cpu features and command-line input.
+ * This must be called after fpu__init_parse_early_param() is called and
+@@ -277,40 +238,10 @@ static enum { ENABLE, DISABLE } eagerfpu = ENABLE;
+ */
+ u64 __init fpu__get_supported_xfeatures_mask(void)
+ {
+- /* Support all xfeatures known to us */
+- if (eagerfpu != DISABLE)
+- return XCNTXT_MASK;
+-
+- /* Warning of xfeatures being disabled for no eagerfpu mode */
+- if (xfeatures_mask & XFEATURE_MASK_EAGER) {
+- pr_err("x86/fpu: eagerfpu switching disabled, disabling the following xstate features: 0x%llx.\n",
+- xfeatures_mask & XFEATURE_MASK_EAGER);
+- }
+-
+- /* Return a mask that masks out all features requiring eagerfpu mode */
+- return ~XFEATURE_MASK_EAGER;
+-}
+-
+-/*
+- * Disable features dependent on eagerfpu.
+- */
+-static void __init fpu__clear_eager_fpu_features(void)
+-{
+- setup_clear_cpu_cap(X86_FEATURE_MPX);
++ return XCNTXT_MASK;
+ }
+
+-/*
+- * Pick the FPU context switching strategy:
+- *
+- * When eagerfpu is AUTO or ENABLE, we ensure it is ENABLE if either of
+- * the following is true:
+- *
+- * (1) the cpu has xsaveopt, as it has the optimization and doing eager
+- * FPU switching has a relatively low cost compared to a plain xsave;
+- * (2) the cpu has xsave features (e.g. MPX) that depend on eager FPU
+- * switching. Should the kernel boot with noxsaveopt, we support MPX
+- * with eager FPU switching at a higher cost.
+- */
++/* Legacy code to initialize eager fpu mode. */
+ static void __init fpu__init_system_ctx_switch(void)
+ {
+ static bool on_boot_cpu = 1;
+@@ -320,17 +251,6 @@ static void __init fpu__init_system_ctx_switch(void)
+
+ WARN_ON_FPU(current->thread.fpu.fpstate_active);
+ current_thread_info()->status = 0;
+-
+- if (boot_cpu_has(X86_FEATURE_XSAVEOPT) && eagerfpu != DISABLE)
+- eagerfpu = ENABLE;
+-
+- if (xfeatures_mask & XFEATURE_MASK_EAGER)
+- eagerfpu = ENABLE;
+-
+- if (eagerfpu == ENABLE)
+- setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);
+-
+- printk(KERN_INFO "x86/fpu: Using '%s' FPU context switches.\n", eagerfpu == ENABLE ? "eager" : "lazy");
+ }
+
+ /*
+@@ -339,11 +259,6 @@ static void __init fpu__init_system_ctx_switch(void)
+ */
+ static void __init fpu__init_parse_early_param(void)
+ {
+- if (cmdline_find_option_bool(boot_command_line, "eagerfpu=off")) {
+- eagerfpu = DISABLE;
+- fpu__clear_eager_fpu_features();
+- }
+-
+ if (cmdline_find_option_bool(boot_command_line, "no387"))
+ setup_clear_cpu_cap(X86_FEATURE_FPU);
+
+--
+2.17.1
+
diff --git a/patches.suse/0002-x86-fpu-Fix-FNSAVE-usage-in-eagerfpu-mode.patch b/patches.kernel.org/4.4.138-011-x86-fpu-Fix-FNSAVE-usage-in-eagerfpu-mode.patch
index ed942b3288..c0ca5635c3 100644
--- a/patches.suse/0002-x86-fpu-Fix-FNSAVE-usage-in-eagerfpu-mode.patch
+++ b/patches.kernel.org/4.4.138-011-x86-fpu-Fix-FNSAVE-usage-in-eagerfpu-mode.patch
@@ -1,9 +1,11 @@
From: Andy Lutomirski <luto@kernel.org>
Date: Sun, 24 Jan 2016 14:38:07 -0800
-Subject: x86/fpu: Fix FNSAVE usage in eagerfpu mode
+Subject: [PATCH] x86/fpu: Fix FNSAVE usage in eagerfpu mode
+Patch-mainline: 4.4.138
+References: CVE-2018-3665 bnc#1012382 bnc#1087086
Git-commit: 5ed73f40735c68d8a656b46d09b1885d3b8740ae
-Patch-mainline: v4.6-rc1
-References: bnc#1087086 CVE-2018-3665
+
+commit 5ed73f40735c68d8a656b46d09b1885d3b8740ae upstream.
In eager fpu mode, having deactivated FPU without immediately
reloading some other context is illegal. Therefore, to recover from
@@ -31,11 +33,14 @@ Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: yu-cheng yu <yu-cheng.yu@intel.com>
Link: http://lkml.kernel.org/r/60662444e13c76f06e23c15c5dcdba31b4ac3d67.1453675014.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Jiri Slaby <jslaby@suse.cz>
---
- arch/x86/kernel/fpu/core.c | 18 +++++++++++++++---
+ arch/x86/kernel/fpu/core.c | 18 +++++++++++++++---
1 file changed, 15 insertions(+), 3 deletions(-)
+diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
+index d5804adfa6da..f9734186a57e 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -114,6 +114,10 @@ void __kernel_fpu_begin(void)
@@ -64,7 +69,7 @@ Signed-off-by: Jiri Slaby <jslaby@suse.cz>
}
preempt_enable();
}
-@@ -259,7 +267,11 @@ static void fpu_copy(struct fpu *dst_fpu
+@@ -259,7 +267,11 @@ static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
preempt_disable();
if (!copy_fpregs_to_fpstate(dst_fpu)) {
memcpy(&src_fpu->state, &dst_fpu->state, xstate_size);
@@ -77,3 +82,6 @@ Signed-off-by: Jiri Slaby <jslaby@suse.cz>
}
preempt_enable();
}
+--
+2.17.1
+
diff --git a/patches.suse/0001-x86-fpu-Fix-math-emulation-in-eager-fpu-mode.patch b/patches.kernel.org/4.4.138-012-x86-fpu-Fix-math-emulation-in-eager-fpu-mode.patch
index 1b32da6b61..55bd17e343 100644
--- a/patches.suse/0001-x86-fpu-Fix-math-emulation-in-eager-fpu-mode.patch
+++ b/patches.kernel.org/4.4.138-012-x86-fpu-Fix-math-emulation-in-eager-fpu-mode.patch
@@ -1,9 +1,11 @@
From: Andy Lutomirski <luto@kernel.org>
Date: Sun, 24 Jan 2016 14:38:06 -0800
-Subject: x86/fpu: Fix math emulation in eager fpu mode
+Subject: [PATCH] x86/fpu: Fix math emulation in eager fpu mode
+Patch-mainline: 4.4.138
+References: CVE-2018-3665 bnc#1012382 bnc#1087086
Git-commit: 4ecd16ec7059390b430af34bd8bc3ca2b5dcef9a
-Patch-mainline: v4.6-rc1
-References: bnc#1087086 CVE-2018-3665
+
+commit 4ecd16ec7059390b430af34bd8bc3ca2b5dcef9a upstream.
Systems without an FPU are generally old and therefore use lazy FPU
switching. Unsurprisingly, math emulation in eager FPU mode is a
@@ -29,16 +31,19 @@ Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: yu-cheng yu <yu-cheng.yu@intel.com>
Link: http://lkml.kernel.org/r/b4b8d112436bd6fab866e1b4011131507e8d7fbe.1453675014.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Jiri Slaby <jslaby@suse.cz>
---
- arch/x86/include/asm/fpu/internal.h | 3 ++-
- arch/x86/kernel/fpu/core.c | 2 +-
- arch/x86/kernel/traps.c | 1 -
+ arch/x86/include/asm/fpu/internal.h | 3 ++-
+ arch/x86/kernel/fpu/core.c | 2 +-
+ arch/x86/kernel/traps.c | 1 -
3 files changed, 3 insertions(+), 3 deletions(-)
+diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
+index 84de3d4f675c..146d838e6ee7 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
-@@ -596,7 +596,8 @@ switch_fpu_prepare(struct fpu *old_fpu,
+@@ -596,7 +596,8 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
* If the task has used the math, pre-load the FPU on xsave processors
* or if the past 5 consecutive context-switches used math.
*/
@@ -48,9 +53,11 @@ Signed-off-by: Jiri Slaby <jslaby@suse.cz>
(use_eager_fpu() || new_fpu->counter > 5);
if (old_fpu->fpregs_active) {
+diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
+index f9734186a57e..6aa0b519c851 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
-@@ -425,7 +425,7 @@ void fpu__clear(struct fpu *fpu)
+@@ -437,7 +437,7 @@ void fpu__clear(struct fpu *fpu)
{
WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
@@ -59,6 +66,8 @@ Signed-off-by: Jiri Slaby <jslaby@suse.cz>
/* FPU state will be reallocated lazily at the first use. */
fpu__drop(fpu);
} else {
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index 1fbd2631be60..8c73bf1492b8 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -751,7 +751,6 @@ dotraplinkage void
@@ -69,3 +78,6 @@ Signed-off-by: Jiri Slaby <jslaby@suse.cz>
#ifdef CONFIG_MATH_EMULATION
if (read_cr0() & X86_CR0_EM) {
+--
+2.17.1
+
diff --git a/patches.kernel.org/4.4.138-013-af_key-Always-verify-length-of-provided-sadb_.patch b/patches.kernel.org/4.4.138-013-af_key-Always-verify-length-of-provided-sadb_.patch
new file mode 100644
index 0000000000..58f3e5937e
--- /dev/null
+++ b/patches.kernel.org/4.4.138-013-af_key-Always-verify-length-of-provided-sadb_.patch
@@ -0,0 +1,113 @@
+From: Kevin Easton <kevin@guarana.org>
+Date: Sat, 7 Apr 2018 11:40:33 -0400
+Subject: [PATCH] af_key: Always verify length of provided sadb_key
+References: bnc#1012382
+Patch-mainline: 4.4.138
+Git-commit: 4b66af2d6356a00e94bcdea3e7fea324e8b5c6f4
+
+commit 4b66af2d6356a00e94bcdea3e7fea324e8b5c6f4 upstream.
+
+Key extensions (struct sadb_key) include a user-specified number of key
+bits. The kernel uses that number to determine how much key data to copy
+out of the message in pfkey_msg2xfrm_state().
+
+The length of the sadb_key message must be verified to be long enough,
+even in the case of SADB_X_AALG_NULL. Furthermore, the sadb_key_len value
+must be long enough to include both the key data and the struct sadb_key
+itself.
+
+Introduce a helper function verify_key_len(), and call it from
+parse_exthdrs() where other exthdr types are similarly checked for
+correctness.
+
+Signed-off-by: Kevin Easton <kevin@guarana.org>
+Reported-by: syzbot+5022a34ca5a3d49b84223653fab632dfb7b4cf37@syzkaller.appspotmail.com
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+Cc: Zubin Mithra <zsm@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/key/af_key.c | 45 +++++++++++++++++++++++++++++++++++----------
+ 1 file changed, 35 insertions(+), 10 deletions(-)
+
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index 15150b412930..3ba903ff2bb0 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -437,6 +437,24 @@ static int verify_address_len(const void *p)
+ return 0;
+ }
+
++static inline int sadb_key_len(const struct sadb_key *key)
++{
++ int key_bytes = DIV_ROUND_UP(key->sadb_key_bits, 8);
++
++ return DIV_ROUND_UP(sizeof(struct sadb_key) + key_bytes,
++ sizeof(uint64_t));
++}
++
++static int verify_key_len(const void *p)
++{
++ const struct sadb_key *key = p;
++
++ if (sadb_key_len(key) > key->sadb_key_len)
++ return -EINVAL;
++
++ return 0;
++}
++
+ static inline int pfkey_sec_ctx_len(const struct sadb_x_sec_ctx *sec_ctx)
+ {
+ return DIV_ROUND_UP(sizeof(struct sadb_x_sec_ctx) +
+@@ -533,16 +551,25 @@ static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void *
+ return -EINVAL;
+ if (ext_hdrs[ext_type-1] != NULL)
+ return -EINVAL;
+- if (ext_type == SADB_EXT_ADDRESS_SRC ||
+- ext_type == SADB_EXT_ADDRESS_DST ||
+- ext_type == SADB_EXT_ADDRESS_PROXY ||
+- ext_type == SADB_X_EXT_NAT_T_OA) {
++ switch (ext_type) {
++ case SADB_EXT_ADDRESS_SRC:
++ case SADB_EXT_ADDRESS_DST:
++ case SADB_EXT_ADDRESS_PROXY:
++ case SADB_X_EXT_NAT_T_OA:
+ if (verify_address_len(p))
+ return -EINVAL;
+- }
+- if (ext_type == SADB_X_EXT_SEC_CTX) {
++ break;
++ case SADB_X_EXT_SEC_CTX:
+ if (verify_sec_ctx_len(p))
+ return -EINVAL;
++ break;
++ case SADB_EXT_KEY_AUTH:
++ case SADB_EXT_KEY_ENCRYPT:
++ if (verify_key_len(p))
++ return -EINVAL;
++ break;
++ default:
++ break;
+ }
+ ext_hdrs[ext_type-1] = (void *) p;
+ }
+@@ -1111,14 +1138,12 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
+ key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
+ if (key != NULL &&
+ sa->sadb_sa_auth != SADB_X_AALG_NULL &&
+- ((key->sadb_key_bits+7) / 8 == 0 ||
+- (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t)))
++ key->sadb_key_bits == 0)
+ return ERR_PTR(-EINVAL);
+ key = ext_hdrs[SADB_EXT_KEY_ENCRYPT-1];
+ if (key != NULL &&
+ sa->sadb_sa_encrypt != SADB_EALG_NULL &&
+- ((key->sadb_key_bits+7) / 8 == 0 ||
+- (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t)))
++ key->sadb_key_bits == 0)
+ return ERR_PTR(-EINVAL);
+
+ x = xfrm_state_alloc(net);
+--
+2.17.1
+
diff --git a/patches.kernel.org/4.4.138-014-x86-crypto-x86-fpu-Remove-X86_FEATURE_EAGER_F.patch b/patches.kernel.org/4.4.138-014-x86-crypto-x86-fpu-Remove-X86_FEATURE_EAGER_F.patch
new file mode 100644
index 0000000000..22190b9e03
--- /dev/null
+++ b/patches.kernel.org/4.4.138-014-x86-crypto-x86-fpu-Remove-X86_FEATURE_EAGER_F.patch
@@ -0,0 +1,62 @@
+From: Andy Lutomirski <luto@kernel.org>
+Date: Tue, 4 Oct 2016 20:34:30 -0400
+Subject: [PATCH] x86/crypto, x86/fpu: Remove X86_FEATURE_EAGER_FPU #ifdef from
+ the crc32c code
+References: bnc#1012382
+Patch-mainline: 4.4.138
+Git-commit: 02f39b2379fb81557ae864ec8f85421c0250c954
+
+commit 02f39b2379fb81557ae864ec8f85421c0250c954 upstream.
+
+The crypto code was checking both use_eager_fpu() and
+defined(X86_FEATURE_EAGER_FPU). The latter was nonsensical, so
+remove it. This will avoid breakage when we remove
+X86_FEATURE_EAGER_FPU.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Signed-off-by: Rik van Riel <riel@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: pbonzini@redhat.com
+Link: http://lkml.kernel.org/r/1475627678-20788-2-git-send-email-riel@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ arch/x86/crypto/crc32c-intel_glue.c | 5 -----
+ 1 file changed, 5 deletions(-)
+
+diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
+index 0e9871693f24..15f5c7675d42 100644
+--- a/arch/x86/crypto/crc32c-intel_glue.c
++++ b/arch/x86/crypto/crc32c-intel_glue.c
+@@ -58,16 +58,11 @@
+ asmlinkage unsigned int crc_pcl(const u8 *buffer, int len,
+ unsigned int crc_init);
+ static int crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_EAGERFPU;
+-#if defined(X86_FEATURE_EAGER_FPU)
+ #define set_pcl_breakeven_point() \
+ do { \
+ if (!use_eager_fpu()) \
+ crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU; \
+ } while (0)
+-#else
+-#define set_pcl_breakeven_point() \
+- (crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU)
+-#endif
+ #endif /* CONFIG_X86_64 */
+
+ static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t length)
+--
+2.17.1
+
diff --git a/patches.kernel.org/4.4.138-015-gpio-No-NULL-owner.patch b/patches.kernel.org/4.4.138-015-gpio-No-NULL-owner.patch
new file mode 100644
index 0000000000..11f77f1715
--- /dev/null
+++ b/patches.kernel.org/4.4.138-015-gpio-No-NULL-owner.patch
@@ -0,0 +1,51 @@
+From: Linus Walleij <linus.walleij@linaro.org>
+Date: Tue, 16 Jan 2018 08:29:50 +0100
+Subject: [PATCH] gpio: No NULL owner
+References: bnc#1012382
+Patch-mainline: 4.4.138
+Git-commit: 7d18f0a14aa6a0d6bad39111c1fb655f07f71d59
+
+commit 7d18f0a14aa6a0d6bad39111c1fb655f07f71d59 upstream.
+
+Sometimes a GPIO is fetched with NULL as parent device, and
+that is just fine. So under these circumstances, avoid using
+dev_name() to provide a name for the GPIO line.
+
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Cc: Daniel Rosenberg <drosen@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ drivers/gpio/gpiolib.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 759a39906a52..fe89fd56eabf 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -2117,6 +2117,8 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
+ struct gpio_desc *desc = NULL;
+ int status;
+ enum gpio_lookup_flags lookupflags = 0;
++ /* Maybe we have a device name, maybe not */
++ const char *devname = dev ? dev_name(dev) : "?";
+
+ dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id);
+
+@@ -2145,8 +2147,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
+ return desc;
+ }
+
+- /* If a connection label was passed use that, else use the device name as label */
+- status = gpiod_request(desc, con_id ? con_id : dev_name(dev));
++ /*
++ * If a connection label was passed use that, else attempt to use
++ * the device name as label
++ */
++ status = gpiod_request(desc, con_id ? con_id : devname);
+ if (status < 0)
+ return ERR_PTR(status);
+
+--
+2.17.1
+
diff --git a/patches.kernel.org/4.4.138-016-Clarify-and-fix-MAX_LFS_FILESIZE-macros.patch b/patches.kernel.org/4.4.138-016-Clarify-and-fix-MAX_LFS_FILESIZE-macros.patch
new file mode 100644
index 0000000000..77c6a229d5
--- /dev/null
+++ b/patches.kernel.org/4.4.138-016-Clarify-and-fix-MAX_LFS_FILESIZE-macros.patch
@@ -0,0 +1,89 @@
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Sun, 27 Aug 2017 12:12:25 -0700
+Subject: [PATCH] Clarify (and fix) MAX_LFS_FILESIZE macros
+References: bnc#1012382
+Patch-mainline: 4.4.138
+Git-commit: 0cc3b0ec23ce4c69e1e890ed2b8d2fa932b14aad
+
+commit 0cc3b0ec23ce4c69e1e890ed2b8d2fa932b14aad upstream.
+
+We have a MAX_LFS_FILESIZE macro that is meant to be filled in by
+filesystems (and other IO targets) that know they are 64-bit clean and
+don't have any 32-bit limits in their IO path.
+
+It turns out that our 32-bit value for that limit was bogus. On 32-bit,
+the VM layer is limited by the page cache to only 32-bit index values,
+but our logic for that was confusing and actually wrong. We used to
+define that value to
+
+ (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1)
+
+which is actually odd in several ways: it limits the index to 31 bits,
+and then it limits files so that they can't have data in that last byte
+of a page that has the highest 31-bit index (ie page index 0x7fffffff).
+
+Neither of those limitations make sense. The index is actually the full
+32 bit unsigned value, and we can use that whole full page. So the
+maximum size of the file would logically be "PAGE_SIZE << BITS_PER_LONG".
+
+However, we do wan tto avoid the maximum index, because we have code
+that iterates over the page indexes, and we don't want that code to
+overflow. So the maximum size of a file on a 32-bit host should
+actually be one page less than the full 32-bit index.
+
+So the actual limit is ULONG_MAX << PAGE_SHIFT. That means that we will
+not actually be using the page of that last index (ULONG_MAX), but we
+can grow a file up to that limit.
+
+The wrong value of MAX_LFS_FILESIZE actually caused problems for Doug
+Nazar, who was still using a 32-bit host, but with a 9.7TB 2 x RAID5
+volume. It turns out that our old MAX_LFS_FILESIZE was 8TiB (well, one
+byte less), but the actual true VM limit is one page less than 16TiB.
+
+This was invisible until commit c2a9737f45e2 ("vfs,mm: fix a dead loop
+in truncate_inode_pages_range()"), which started applying that
+MAX_LFS_FILESIZE limit to block devices too.
+
+NOTE! On 64-bit, the page index isn't a limiter at all, and the limit is
+actually just the offset type itself (loff_t), which is signed. But for
+clarity, on 64-bit, just use the maximum signed value, and don't make
+people have to count the number of 'f' characters in the hex constant.
+
+So just use LLONG_MAX for the 64-bit case. That was what the value had
+been before too, just written out as a hex constant.
+
+Fixes: c2a9737f45e2 ("vfs,mm: fix a dead loop in truncate_inode_pages_range()")
+Reported-and-tested-by: Doug Nazar <nazard@nazar.ca>
+Cc: Andreas Dilger <adilger@dilger.ca>
+Cc: Mark Fasheh <mfasheh@versity.com>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Dave Kleikamp <shaggy@kernel.org>
+Cc: stable@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Rafael Tinoco <rafael.tinoco@linaro.org>
+[backported to 4.4.y due to requests of failed LTP tests - gregkh]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ include/linux/fs.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index da79e9d66e5b..240cbaee819f 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -926,9 +926,9 @@ static inline struct file *get_file(struct file *f)
+ /* Page cache limit. The filesystems should put that into their s_maxbytes
+ limits, otherwise bad things can happen in VM. */
+ #if BITS_PER_LONG==32
+-#define MAX_LFS_FILESIZE (((loff_t)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
++#define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT)
+ #elif BITS_PER_LONG==64
+-#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL)
++#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX)
+ #endif
+
+ #define FL_POSIX 1
+--
+2.17.1
+
diff --git a/patches.kernel.org/4.4.138-017-KVM-x86-introduce-linear_-read-write-_system.patch b/patches.kernel.org/4.4.138-017-KVM-x86-introduce-linear_-read-write-_system.patch
new file mode 100644
index 0000000000..8c1c7fbcfd
--- /dev/null
+++ b/patches.kernel.org/4.4.138-017-KVM-x86-introduce-linear_-read-write-_system.patch
@@ -0,0 +1,190 @@
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Wed, 6 Jun 2018 16:43:02 +0200
+Subject: [PATCH] KVM: x86: introduce linear_{read,write}_system
+References: bnc#1012382
+Patch-mainline: 4.4.138
+Git-commit: 79367a65743975e5cac8d24d08eccc7fdae832b0
+
+commit 79367a65743975e5cac8d24d08eccc7fdae832b0 upstream.
+
+Wrap the common invocation of ctxt->ops->read_std and ctxt->ops->write_std, so
+as to have a smaller patch when the functions grow another argument.
+
+Fixes: 129a72a0d3c8 ("KVM: x86: Introduce segmented_write_std", 2017-01-12)
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ arch/x86/kvm/emulate.c | 64 +++++++++++++++++++++---------------------
+ 1 file changed, 32 insertions(+), 32 deletions(-)
+
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 8864fec63a20..c27e09ee8669 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -790,6 +790,19 @@ static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
+ return assign_eip_near(ctxt, ctxt->_eip + rel);
+ }
+
++static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
++ void *data, unsigned size)
++{
++ return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
++}
++
++static int linear_write_system(struct x86_emulate_ctxt *ctxt,
++ ulong linear, void *data,
++ unsigned int size)
++{
++ return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
++}
++
+ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
+ struct segmented_address addr,
+ void *data,
+@@ -1488,8 +1501,7 @@ static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
+ return emulate_gp(ctxt, index << 3 | 0x2);
+
+ addr = dt.address + index * 8;
+- return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
+- &ctxt->exception);
++ return linear_read_system(ctxt, addr, desc, sizeof *desc);
+ }
+
+ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
+@@ -1552,8 +1564,7 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+- return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
+- &ctxt->exception);
++ return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
+ }
+
+ /* allowed just for 8 bytes segments */
+@@ -1567,8 +1578,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+- return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
+- &ctxt->exception);
++ return linear_write_system(ctxt, addr, desc, sizeof *desc);
+ }
+
+ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+@@ -1729,8 +1739,7 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ return ret;
+ }
+ } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
+- ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
+- sizeof(base3), &ctxt->exception);
++ ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ if (is_noncanonical_address(get_desc_base(&seg_desc) |
+@@ -2043,11 +2052,11 @@ static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
+ eip_addr = dt.address + (irq << 2);
+ cs_addr = dt.address + (irq << 2) + 2;
+
+- rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
++ rc = linear_read_system(ctxt, cs_addr, &cs, 2);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+- rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
++ rc = linear_read_system(ctxt, eip_addr, &eip, 2);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+@@ -3025,35 +3034,30 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
+ u16 tss_selector, u16 old_tss_sel,
+ ulong old_tss_base, struct desc_struct *new_desc)
+ {
+- const struct x86_emulate_ops *ops = ctxt->ops;
+ struct tss_segment_16 tss_seg;
+ int ret;
+ u32 new_tss_base = get_desc_base(new_desc);
+
+- ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
+- &ctxt->exception);
++ ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+ save_state_to_tss16(ctxt, &tss_seg);
+
+- ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
+- &ctxt->exception);
++ ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+- ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
+- &ctxt->exception);
++ ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+ if (old_tss_sel != 0xffff) {
+ tss_seg.prev_task_link = old_tss_sel;
+
+- ret = ops->write_std(ctxt, new_tss_base,
+- &tss_seg.prev_task_link,
+- sizeof tss_seg.prev_task_link,
+- &ctxt->exception);
++ ret = linear_write_system(ctxt, new_tss_base,
++ &tss_seg.prev_task_link,
++ sizeof tss_seg.prev_task_link);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ }
+@@ -3169,38 +3173,34 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
+ u16 tss_selector, u16 old_tss_sel,
+ ulong old_tss_base, struct desc_struct *new_desc)
+ {
+- const struct x86_emulate_ops *ops = ctxt->ops;
+ struct tss_segment_32 tss_seg;
+ int ret;
+ u32 new_tss_base = get_desc_base(new_desc);
+ u32 eip_offset = offsetof(struct tss_segment_32, eip);
+ u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
+
+- ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
+- &ctxt->exception);
++ ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+ save_state_to_tss32(ctxt, &tss_seg);
+
+ /* Only GP registers and segment selectors are saved */
+- ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
+- ldt_sel_offset - eip_offset, &ctxt->exception);
++ ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
++ ldt_sel_offset - eip_offset);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+- ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
+- &ctxt->exception);
++ ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+ if (old_tss_sel != 0xffff) {
+ tss_seg.prev_task_link = old_tss_sel;
+
+- ret = ops->write_std(ctxt, new_tss_base,
+- &tss_seg.prev_task_link,
+- sizeof tss_seg.prev_task_link,
+- &ctxt->exception);
++ ret = linear_write_system(ctxt, new_tss_base,
++ &tss_seg.prev_task_link,
++ sizeof tss_seg.prev_task_link);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ }
+--
+2.17.1
+
diff --git a/patches.kernel.org/4.4.138-018-KVM-x86-pass-kvm_vcpu-to-kvm_read_guest_virt-.patch b/patches.kernel.org/4.4.138-018-KVM-x86-pass-kvm_vcpu-to-kvm_read_guest_virt-.patch
new file mode 100644
index 0000000000..7c71ca59b2
--- /dev/null
+++ b/patches.kernel.org/4.4.138-018-KVM-x86-pass-kvm_vcpu-to-kvm_read_guest_virt-.patch
@@ -0,0 +1,203 @@
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Wed, 6 Jun 2018 17:37:49 +0200
+Subject: [PATCH] KVM: x86: pass kvm_vcpu to kvm_read_guest_virt and
+ kvm_write_guest_virt_system
+References: bnc#1012382
+Patch-mainline: 4.4.138
+Git-commit: ce14e868a54edeb2e30cb7a7b104a2fc4b9d76ca
+
+commit ce14e868a54edeb2e30cb7a7b104a2fc4b9d76ca upstream.
+
+Int the next patch the emulator's .read_std and .write_std callbacks will
+grow another argument, which is not needed in kvm_read_guest_virt and
+kvm_write_guest_virt_system's callers. Since we have to make separate
+functions, let's give the currently existing names a nicer interface, too.
+
+Fixes: 129a72a0d3c8 ("KVM: x86: Introduce segmented_write_std", 2017-01-12)
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ arch/x86/kvm/vmx.c | 23 ++++++++++-------------
+ arch/x86/kvm/x86.c | 39 ++++++++++++++++++++++++++-------------
+ arch/x86/kvm/x86.h | 4 ++--
+ 3 files changed, 38 insertions(+), 28 deletions(-)
+
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index a750fc7c7458..63c44a9bf6bb 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -6692,8 +6692,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
+ vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
+ return 1;
+
+- if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
+- sizeof(vmptr), &e)) {
++ if (kvm_read_guest_virt(vcpu, gva, &vmptr, sizeof(vmptr), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+@@ -7211,8 +7210,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
+ vmx_instruction_info, true, &gva))
+ return 1;
+ /* _system ok, as nested_vmx_check_permission verified cpl=0 */
+- kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
+- &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
++ kvm_write_guest_virt_system(vcpu, gva, &field_value,
++ (is_long_mode(vcpu) ? 8 : 4), NULL);
+ }
+
+ nested_vmx_succeed(vcpu);
+@@ -7247,8 +7246,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
+ if (get_vmx_mem_address(vcpu, exit_qualification,
+ vmx_instruction_info, false, &gva))
+ return 1;
+- if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
+- &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
++ if (kvm_read_guest_virt(vcpu, gva, &field_value,
++ (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+@@ -7338,9 +7337,9 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
+ vmx_instruction_info, true, &vmcs_gva))
+ return 1;
+ /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */
+- if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
+- (void *)&to_vmx(vcpu)->nested.current_vmptr,
+- sizeof(u64), &e)) {
++ if (kvm_write_guest_virt_system(vcpu, vmcs_gva,
++ (void *)&to_vmx(vcpu)->nested.current_vmptr,
++ sizeof(u64), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+@@ -7394,8 +7393,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
+ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+ vmx_instruction_info, false, &gva))
+ return 1;
+- if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
+- sizeof(operand), &e)) {
++ if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+@@ -7454,8 +7452,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
+ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+ vmx_instruction_info, false, &gva))
+ return 1;
+- if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vpid,
+- sizeof(u32), &e)) {
++ if (kvm_read_guest_virt(vcpu, gva, &vpid, sizeof(u32), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 9cea09597d66..b6238ce9806d 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -4245,11 +4245,10 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
+ return X86EMUL_CONTINUE;
+ }
+
+-int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
++int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
+ gva_t addr, void *val, unsigned int bytes,
+ struct x86_exception *exception)
+ {
+- struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+ u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+
+ return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
+@@ -4257,9 +4256,9 @@ int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
+ }
+ EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
+
+-static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
+- gva_t addr, void *val, unsigned int bytes,
+- struct x86_exception *exception)
++static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
++ gva_t addr, void *val, unsigned int bytes,
++ struct x86_exception *exception)
+ {
+ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+ return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
+@@ -4274,18 +4273,16 @@ static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
+ return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE;
+ }
+
+-int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
+- gva_t addr, void *val,
+- unsigned int bytes,
+- struct x86_exception *exception)
++static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
++ struct kvm_vcpu *vcpu, u32 access,
++ struct x86_exception *exception)
+ {
+- struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+ void *data = val;
+ int r = X86EMUL_CONTINUE;
+
+ while (bytes) {
+ gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
+- PFERR_WRITE_MASK,
++ access,
+ exception);
+ unsigned offset = addr & (PAGE_SIZE-1);
+ unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
+@@ -4306,6 +4303,22 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
+ out:
+ return r;
+ }
++
++static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val,
++ unsigned int bytes, struct x86_exception *exception)
++{
++ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
++
++ return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
++ PFERR_WRITE_MASK, exception);
++}
++
++int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
++ unsigned int bytes, struct x86_exception *exception)
++{
++ return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
++ PFERR_WRITE_MASK, exception);
++}
+ EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
+
+ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
+@@ -5025,8 +5038,8 @@ static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_fla
+ static const struct x86_emulate_ops emulate_ops = {
+ .read_gpr = emulator_read_gpr,
+ .write_gpr = emulator_write_gpr,
+- .read_std = kvm_read_guest_virt_system,
+- .write_std = kvm_write_guest_virt_system,
++ .read_std = emulator_read_std,
++ .write_std = emulator_write_std,
+ .read_phys = kvm_read_guest_phys_system,
+ .fetch = kvm_fetch_guest_virt,
+ .read_emulated = emulator_read_emulated,
+diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
+index f2afa5fe48a6..53a750a10598 100644
+--- a/arch/x86/kvm/x86.h
++++ b/arch/x86/kvm/x86.h
+@@ -164,11 +164,11 @@ int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
+
+ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
+
+-int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
++int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
+ gva_t addr, void *val, unsigned int bytes,
+ struct x86_exception *exception);
+
+-int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
++int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
+ gva_t addr, void *val, unsigned int bytes,
+ struct x86_exception *exception);
+
+--
+2.17.1
+
diff --git a/patches.kernel.org/4.4.138-019-serial-samsung-fix-maxburst-parameter-for-DMA.patch b/patches.kernel.org/4.4.138-019-serial-samsung-fix-maxburst-parameter-for-DMA.patch
new file mode 100644
index 0000000000..ce3f2e1c36
--- /dev/null
+++ b/patches.kernel.org/4.4.138-019-serial-samsung-fix-maxburst-parameter-for-DMA.patch
@@ -0,0 +1,48 @@
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+Date: Thu, 10 May 2018 08:41:13 +0200
+Subject: [PATCH] serial: samsung: fix maxburst parameter for DMA transactions
+References: bnc#1012382
+Patch-mainline: 4.4.138
+Git-commit: aa2f80e752c75e593b3820f42c416ed9458fa73e
+
+commit aa2f80e752c75e593b3820f42c416ed9458fa73e upstream.
+
+The best granularity of residue that DMA engine can report is in the BURST
+units, so the serial driver must use MAXBURST = 1 and DMA_SLAVE_BUSWIDTH_1_BYTE
+if it relies on exact number of bytes transferred by DMA engine.
+
+Fixes: 62c37eedb74c ("serial: samsung: add dma reqest/release functions")
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Acked-by: Krzysztof Kozlowski <krzk@kernel.org>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ drivers/tty/serial/samsung.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
+index 312343beb249..4d532a085db9 100644
+--- a/drivers/tty/serial/samsung.c
++++ b/drivers/tty/serial/samsung.c
+@@ -860,15 +860,12 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
+ dma->rx_conf.direction = DMA_DEV_TO_MEM;
+ dma->rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma->rx_conf.src_addr = p->port.mapbase + S3C2410_URXH;
+- dma->rx_conf.src_maxburst = 16;
++ dma->rx_conf.src_maxburst = 1;
+
+ dma->tx_conf.direction = DMA_MEM_TO_DEV;
+ dma->tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma->tx_conf.dst_addr = p->port.mapbase + S3C2410_UTXH;
+- if (dma_get_cache_alignment() >= 16)
+- dma->tx_conf.dst_maxburst = 16;
+- else
+- dma->tx_conf.dst_maxburst = 1;
++ dma->tx_conf.dst_maxburst = 1;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+--
+2.17.1
+
diff --git a/patches.kernel.org/4.4.138-020-vmw_balloon-fixing-double-free-when-batching-.patch b/patches.kernel.org/4.4.138-020-vmw_balloon-fixing-double-free-when-batching-.patch
new file mode 100644
index 0000000000..99f12b639a
--- /dev/null
+++ b/patches.kernel.org/4.4.138-020-vmw_balloon-fixing-double-free-when-batching-.patch
@@ -0,0 +1,119 @@
+From: Gil Kupfer <gilkup@gmail.com>
+Date: Fri, 1 Jun 2018 00:47:47 -0700
+Subject: [PATCH] vmw_balloon: fixing double free when batching mode is off
+References: bnc#1012382
+Patch-mainline: 4.4.138
+Git-commit: b23220fe054e92f616b82450fae8cd3ab176cc60
+
+commit b23220fe054e92f616b82450fae8cd3ab176cc60 upstream.
+
+The balloon.page field is used for two different purposes if batching is
+on or off. If batching is on, the field point to the page which is used
+to communicate with with the hypervisor. If it is off, balloon.page
+points to the page that is about to be (un)locked.
+
+Unfortunately, this dual-purpose of the field introduced a bug: when the
+balloon is popped (e.g., when the machine is reset or the balloon driver
+is explicitly removed), the balloon driver frees, unconditionally, the
+page that is held in balloon.page. As a result, if batching is
+disabled, this leads to double freeing the last page that is sent to the
+hypervisor.
+
+The following error occurs during rmmod when kernel checkers are on, and
+the balloon is not empty:
+
+[ 42.307653] ------------[ cut here ]------------
+[ 42.307657] Kernel BUG at ffffffffba1e4b28 [verbose debug info unavailable]
+[ 42.307720] invalid opcode: 0000 [#1] SMP DEBUG_PAGEALLOC
+[ 42.312512] Modules linked in: vmw_vsock_vmci_transport vsock ppdev joydev vmw_balloon(-) input_leds serio_raw vmw_vmci parport_pc shpchp parport i2c_piix4 nfit mac_hid autofs4 vmwgfx drm_kms_helper hid_generic syscopyarea sysfillrect usbhid sysimgblt fb_sys_fops hid ttm mptspi scsi_transport_spi ahci mptscsih drm psmouse vmxnet3 libahci mptbase pata_acpi
+[ 42.312766] CPU: 10 PID: 1527 Comm: rmmod Not tainted 4.12.0+ #5
+[ 42.312803] Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 09/30/2016
+[ 42.313042] task: ffff9bf9680f8000 task.stack: ffffbfefc1638000
+[ 42.313290] RIP: 0010:__free_pages+0x38/0x40
+[ 42.313510] RSP: 0018:ffffbfefc163be98 EFLAGS: 00010246
+[ 42.313731] RAX: 000000000000003e RBX: ffffffffc02b9720 RCX: 0000000000000006
+[ 42.313972] RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff9bf97e08e0a0
+[ 42.314201] RBP: ffffbfefc163be98 R08: 0000000000000000 R09: 0000000000000000
+[ 42.314435] R10: 0000000000000000 R11: 0000000000000000 R12: ffffffffc02b97e4
+[ 42.314505] R13: ffffffffc02b9748 R14: ffffffffc02b9728 R15: 0000000000000200
+[ 42.314550] FS: 00007f3af5fec700(0000) GS:ffff9bf97e080000(0000) knlGS:0000000000000000
+[ 42.314599] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 42.314635] CR2: 00007f44f6f4ab24 CR3: 00000003a7d12000 CR4: 00000000000006e0
+[ 42.314864] Call Trace:
+[ 42.315774] vmballoon_pop+0x102/0x130 [vmw_balloon]
+[ 42.315816] vmballoon_exit+0x42/0xd64 [vmw_balloon]
+[ 42.315853] SyS_delete_module+0x1e2/0x250
+[ 42.315891] entry_SYSCALL_64_fastpath+0x23/0xc2
+[ 42.315924] RIP: 0033:0x7f3af5b0e8e7
+[ 42.315949] RSP: 002b:00007fffe6ce0148 EFLAGS: 00000206 ORIG_RAX: 00000000000000b0
+[ 42.315996] RAX: ffffffffffffffda RBX: 000055be676401e0 RCX: 00007f3af5b0e8e7
+[ 42.316951] RDX: 000000000000000a RSI: 0000000000000800 RDI: 000055be67640248
+[ 42.317887] RBP: 0000000000000003 R08: 0000000000000000 R09: 1999999999999999
+[ 42.318845] R10: 0000000000000883 R11: 0000000000000206 R12: 00007fffe6cdf130
+[ 42.319755] R13: 0000000000000000 R14: 0000000000000000 R15: 000055be676401e0
+[ 42.320606] Code: c0 74 1c f0 ff 4f 1c 74 02 5d c3 85 f6 74 07 e8 0f d8 ff ff 5d c3 31 f6 e8 c6 fb ff ff 5d c3 48 c7 c6 c8 0f c5 ba e8 58 be 02 00 <0f> 0b 66 0f 1f 44 00 00 66 66 66 66 90 48 85 ff 75 01 c3 55 48
+[ 42.323462] RIP: __free_pages+0x38/0x40 RSP: ffffbfefc163be98
+[ 42.325735] ---[ end trace 872e008e33f81508 ]---
+
+To solve the bug, we eliminate the dual purpose of balloon.page.
+
+Fixes: f220a80f0c2e ("VMware balloon: add batching to the vmw_balloon.")
+Cc: stable@vger.kernel.org
+Reported-by: Oleksandr Natalenko <onatalen@redhat.com>
+Signed-off-by: Gil Kupfer <gilkup@gmail.com>
+Signed-off-by: Nadav Amit <namit@vmware.com>
+Reviewed-by: Xavier Deguillard <xdeguillard@vmware.com>
+Tested-by: Oleksandr Natalenko <oleksandr@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ drivers/misc/vmw_balloon.c | 23 +++++++----------------
+ 1 file changed, 7 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
+index 1e688bfec567..fe90b7e04427 100644
+--- a/drivers/misc/vmw_balloon.c
++++ b/drivers/misc/vmw_balloon.c
+@@ -576,15 +576,9 @@ static void vmballoon_pop(struct vmballoon *b)
+ }
+ }
+
+- if (b->batch_page) {
+- vunmap(b->batch_page);
+- b->batch_page = NULL;
+- }
+-
+- if (b->page) {
+- __free_page(b->page);
+- b->page = NULL;
+- }
++ /* Clearing the batch_page unconditionally has no adverse effect */
++ free_page((unsigned long)b->batch_page);
++ b->batch_page = NULL;
+ }
+
+ /*
+@@ -991,16 +985,13 @@ static const struct vmballoon_ops vmballoon_batched_ops = {
+
+ static bool vmballoon_init_batching(struct vmballoon *b)
+ {
+- b->page = alloc_page(VMW_PAGE_ALLOC_NOSLEEP);
+- if (!b->page)
+- return false;
++ struct page *page;
+
+- b->batch_page = vmap(&b->page, 1, VM_MAP, PAGE_KERNEL);
+- if (!b->batch_page) {
+- __free_page(b->page);
++ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
++ if (!page)
+ return false;
+- }
+
++ b->batch_page = page_address(page);
+ return true;
+ }
+
+--
+2.17.1
+
diff --git a/patches.kernel.org/4.4.138-021-kvm-x86-use-correct-privilege-level-for-sgdt-.patch b/patches.kernel.org/4.4.138-021-kvm-x86-use-correct-privilege-level-for-sgdt-.patch
new file mode 100644
index 0000000000..92a4b82cc7
--- /dev/null
+++ b/patches.kernel.org/4.4.138-021-kvm-x86-use-correct-privilege-level-for-sgdt-.patch
@@ -0,0 +1,159 @@
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Wed, 6 Jun 2018 17:38:09 +0200
+Subject: [PATCH] kvm: x86: use correct privilege level for
+ sgdt/sidt/fxsave/fxrstor access
+References: bnc#1012382
+Patch-mainline: 4.4.138
+Git-commit: 3c9fa24ca7c9c47605672916491f79e8ccacb9e6
+
+commit 3c9fa24ca7c9c47605672916491f79e8ccacb9e6 upstream.
+
+The functions that were used in the emulation of fxrstor, fxsave, sgdt and
+sidt were originally meant for task switching, and as such they did not
+check privilege levels. This is very bad when the same functions are used
+in the emulation of unprivileged instructions. This is CVE-2018-10853.
+
+The obvious fix is to add a new argument to ops->read_std and ops->write_std,
+which decides whether the access is a "system" access or should use the
+processor's CPL.
+
+Fixes: 129a72a0d3c8 ("KVM: x86: Introduce segmented_write_std", 2017-01-12)
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ arch/x86/include/asm/kvm_emulate.h | 6 ++++--
+ arch/x86/kvm/emulate.c | 12 ++++++------
+ arch/x86/kvm/x86.c | 18 ++++++++++++++----
+ 3 files changed, 24 insertions(+), 12 deletions(-)
+
+diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
+index fc3c7e49c8e4..ae357d0afc91 100644
+--- a/arch/x86/include/asm/kvm_emulate.h
++++ b/arch/x86/include/asm/kvm_emulate.h
+@@ -105,11 +105,12 @@ struct x86_emulate_ops {
+ * @addr: [IN ] Linear address from which to read.
+ * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
+ * @bytes: [IN ] Number of bytes to read from memory.
++ * @system:[IN ] Whether the access is forced to be at CPL0.
+ */
+ int (*read_std)(struct x86_emulate_ctxt *ctxt,
+ unsigned long addr, void *val,
+ unsigned int bytes,
+- struct x86_exception *fault);
++ struct x86_exception *fault, bool system);
+
+ /*
+ * read_phys: Read bytes of standard (non-emulated/special) memory.
+@@ -127,10 +128,11 @@ struct x86_emulate_ops {
+ * @addr: [IN ] Linear address to which to write.
+ * @val: [OUT] Value write to memory, zero-extended to 'u_long'.
+ * @bytes: [IN ] Number of bytes to write to memory.
++ * @system:[IN ] Whether the access is forced to be at CPL0.
+ */
+ int (*write_std)(struct x86_emulate_ctxt *ctxt,
+ unsigned long addr, void *val, unsigned int bytes,
+- struct x86_exception *fault);
++ struct x86_exception *fault, bool system);
+ /*
+ * fetch: Read bytes of standard (non-emulated/special) memory.
+ * Used for instruction fetch.
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index c27e09ee8669..f1507626ed36 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -793,14 +793,14 @@ static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
+ static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
+ void *data, unsigned size)
+ {
+- return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
++ return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
+ }
+
+ static int linear_write_system(struct x86_emulate_ctxt *ctxt,
+ ulong linear, void *data,
+ unsigned int size)
+ {
+- return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
++ return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
+ }
+
+ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
+@@ -814,7 +814,7 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
+ rc = linearize(ctxt, addr, size, false, &linear);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+- return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
++ return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
+ }
+
+ static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
+@@ -828,7 +828,7 @@ static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
+ rc = linearize(ctxt, addr, size, true, &linear);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+- return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
++ return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
+ }
+
+ /*
+@@ -2900,12 +2900,12 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
+ #ifdef CONFIG_X86_64
+ base |= ((u64)base3) << 32;
+ #endif
+- r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
++ r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
+ if (r != X86EMUL_CONTINUE)
+ return false;
+ if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
+ return false;
+- r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
++ r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
+ if (r != X86EMUL_CONTINUE)
+ return false;
+ if ((perm >> bit_idx) & mask)
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index b6238ce9806d..53d43d22a84b 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -4258,10 +4258,15 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
+
+ static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
+ gva_t addr, void *val, unsigned int bytes,
+- struct x86_exception *exception)
++ struct x86_exception *exception, bool system)
+ {
+ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+- return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
++ u32 access = 0;
++
++ if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
++ access |= PFERR_USER_MASK;
++
++ return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception);
+ }
+
+ static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
+@@ -4305,12 +4310,17 @@ static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes
+ }
+
+ static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val,
+- unsigned int bytes, struct x86_exception *exception)
++ unsigned int bytes, struct x86_exception *exception,
++ bool system)
+ {
+ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
++ u32 access = PFERR_WRITE_MASK;
++
++ if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
++ access |= PFERR_USER_MASK;
+
+ return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
+- PFERR_WRITE_MASK, exception);
++ access, exception);
+ }
+
+ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
+--
+2.17.1
+
diff --git a/patches.kernel.org/4.4.138-022-Input-goodix-add-new-ACPI-id-for-GPD-Win-2-to.patch b/patches.kernel.org/4.4.138-022-Input-goodix-add-new-ACPI-id-for-GPD-Win-2-to.patch
new file mode 100644
index 0000000000..3a70e88f97
--- /dev/null
+++ b/patches.kernel.org/4.4.138-022-Input-goodix-add-new-ACPI-id-for-GPD-Win-2-to.patch
@@ -0,0 +1,37 @@
+From: Ethan Lee <flibitijibibo@gmail.com>
+Date: Thu, 31 May 2018 16:13:17 -0700
+Subject: [PATCH] Input: goodix - add new ACPI id for GPD Win 2 touch screen
+References: bnc#1012382
+Patch-mainline: 4.4.138
+Git-commit: 5ca4d1ae9bad0f59bd6f851c39b19f5366953666
+
+commit 5ca4d1ae9bad0f59bd6f851c39b19f5366953666 upstream.
+
+GPD Win 2 Website: http://www.gpd.hk/gpdwin2.asp
+
+Tested on a unit from the first production run sent to Indiegogo backers
+
+Signed-off-by: Ethan Lee <flibitijibibo@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ drivers/input/touchscreen/goodix.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
+index 4d113c9e4b77..7bf2597ce44c 100644
+--- a/drivers/input/touchscreen/goodix.c
++++ b/drivers/input/touchscreen/goodix.c
+@@ -425,6 +425,7 @@ MODULE_DEVICE_TABLE(i2c, goodix_ts_id);
+ #ifdef CONFIG_ACPI
+ static const struct acpi_device_id goodix_acpi_match[] = {
+ { "GDIX1001", 0 },
++ { "GDIX1002", 0 },
+ { }
+ };
+ MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
+--
+2.17.1
+
diff --git a/patches.kernel.org/4.4.138-023-Input-elan_i2c-add-ELAN0612-Lenovo-v330-14IKB.patch b/patches.kernel.org/4.4.138-023-Input-elan_i2c-add-ELAN0612-Lenovo-v330-14IKB.patch
new file mode 100644
index 0000000000..a8e913ec10
--- /dev/null
+++ b/patches.kernel.org/4.4.138-023-Input-elan_i2c-add-ELAN0612-Lenovo-v330-14IKB.patch
@@ -0,0 +1,37 @@
+From: Johannes Wienke <languitar@semipol.de>
+Date: Mon, 4 Jun 2018 13:37:26 -0700
+Subject: [PATCH] Input: elan_i2c - add ELAN0612 (Lenovo v330 14IKB) ACPI ID
+References: bnc#1012382
+Patch-mainline: 4.4.138
+Git-commit: e6e7e9cd8eed0e18217c899843bffbe8c7dae564
+
+commit e6e7e9cd8eed0e18217c899843bffbe8c7dae564 upstream.
+
+Add ELAN0612 to the list of supported touchpads; this ID is used in Lenovo
+v330 14IKB devices.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=199253
+Signed-off-by: Johannes Wienke <languitar@semipol.de>
+Cc: stable@vger.kernel.org
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ drivers/input/mouse/elan_i2c_core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
+index 3851d5715772..aeb8250ab079 100644
+--- a/drivers/input/mouse/elan_i2c_core.c
++++ b/drivers/input/mouse/elan_i2c_core.c
+@@ -1249,6 +1249,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
+ { "ELAN060B", 0 },
+ { "ELAN060C", 0 },
+ { "ELAN0611", 0 },
++ { "ELAN0612", 0 },
+ { "ELAN1000", 0 },
+ { }
+ };
+--
+2.17.1
+
diff --git a/patches.kernel.org/4.4.138-024-crypto-vmx-Remove-overly-verbose-printk-from-.patch b/patches.kernel.org/4.4.138-024-crypto-vmx-Remove-overly-verbose-printk-from-.patch
new file mode 100644
index 0000000000..7f60f6479f
--- /dev/null
+++ b/patches.kernel.org/4.4.138-024-crypto-vmx-Remove-overly-verbose-printk-from-.patch
@@ -0,0 +1,90 @@
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Thu, 3 May 2018 22:29:29 +1000
+Subject: [PATCH] crypto: vmx - Remove overly verbose printk from AES init
+ routines
+References: bnc#1012382
+Patch-mainline: 4.4.138
+Git-commit: 1411b5218adbcf1d45ddb260db5553c52e8d917c
+
+commit 1411b5218adbcf1d45ddb260db5553c52e8d917c upstream.
+
+In the vmx AES init routines we do a printk(KERN_INFO ...) to report
+the fallback implementation we're using.
+
+However with a slow console this can significantly affect the speed of
+crypto operations. Using 'cryptsetup benchmark' the removal of the
+printk() leads to a ~5x speedup for aes-cbc decryption.
+
+So remove them.
+
+Fixes: 8676590a1593 ("crypto: vmx - Adding AES routines for VMX module")
+Fixes: 8c755ace357c ("crypto: vmx - Adding CBC routines for VMX module")
+Fixes: 4f7f60d312b3 ("crypto: vmx - Adding CTR routines for VMX module")
+Fixes: cc333cd68dfa ("crypto: vmx - Adding GHASH routines for VMX module")
+Cc: stable@vger.kernel.org # v4.1+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ drivers/crypto/vmx/aes.c | 2 --
+ drivers/crypto/vmx/aes_cbc.c | 2 --
+ drivers/crypto/vmx/aes_ctr.c | 2 --
+ drivers/crypto/vmx/ghash.c | 2 --
+ 4 files changed, 8 deletions(-)
+
+diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
+index 263af709e536..b907e4b1bbe2 100644
+--- a/drivers/crypto/vmx/aes.c
++++ b/drivers/crypto/vmx/aes.c
+@@ -53,8 +53,6 @@ static int p8_aes_init(struct crypto_tfm *tfm)
+ alg, PTR_ERR(fallback));
+ return PTR_ERR(fallback);
+ }
+- printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+- crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
+
+ crypto_cipher_set_flags(fallback,
+ crypto_cipher_get_flags((struct
+diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
+index 3f8bb9a40df1..9506e8693c81 100644
+--- a/drivers/crypto/vmx/aes_cbc.c
++++ b/drivers/crypto/vmx/aes_cbc.c
+@@ -55,8 +55,6 @@ static int p8_aes_cbc_init(struct crypto_tfm *tfm)
+ alg, PTR_ERR(fallback));
+ return PTR_ERR(fallback);
+ }
+- printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+- crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
+
+ crypto_blkcipher_set_flags(
+ fallback,
+diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
+index d83ab4bac8b1..7d070201b3d3 100644
+--- a/drivers/crypto/vmx/aes_ctr.c
++++ b/drivers/crypto/vmx/aes_ctr.c
+@@ -53,8 +53,6 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm)
+ alg, PTR_ERR(fallback));
+ return PTR_ERR(fallback);
+ }
+- printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+- crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
+
+ crypto_blkcipher_set_flags(
+ fallback,
+diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
+index 9cb3a0b715e2..84b9389bf1ed 100644
+--- a/drivers/crypto/vmx/ghash.c
++++ b/drivers/crypto/vmx/ghash.c
+@@ -64,8 +64,6 @@ static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
+ alg, PTR_ERR(fallback));
+ return PTR_ERR(fallback);
+ }
+- printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+- crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback)));
+
+ crypto_shash_set_flags(fallback,
+ crypto_shash_get_flags((struct crypto_shash
+--
+2.17.1
+
diff --git a/patches.kernel.org/4.4.138-025-Linux-4.4.138.patch b/patches.kernel.org/4.4.138-025-Linux-4.4.138.patch
new file mode 100644
index 0000000000..3e1118a4bd
--- /dev/null
+++ b/patches.kernel.org/4.4.138-025-Linux-4.4.138.patch
@@ -0,0 +1,27 @@
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Sat, 16 Jun 2018 09:54:27 +0200
+Subject: [PATCH] Linux 4.4.138
+References: bnc#1012382
+Patch-mainline: 4.4.138
+Git-commit: 0bd2bedb3501db249b347e5acbfd3415bd7667a5
+
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Makefile b/Makefile
+index 44efd1252ab8..1a8c0fc6b997 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 137
++SUBLEVEL = 138
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+
+--
+2.17.1
+
diff --git a/patches.suse/revert-x86-fpu-Hard-disable-lazy-FPU-mode.patch b/patches.suse/revert-x86-fpu-Hard-disable-lazy-FPU-mode.patch
new file mode 100644
index 0000000000..bc4f230675
--- /dev/null
+++ b/patches.suse/revert-x86-fpu-Hard-disable-lazy-FPU-mode.patch
@@ -0,0 +1,169 @@
+From: Jiri Slaby <jslaby@suse.cz>
+Date: Sat, 16 Jun 2018 16:34:14 +0200
+Subject: Revert "x86/fpu: Hard-disable lazy FPU mode"
+Patch-mainline: never, SUSE specific
+References: compatibility
+
+This reverts commit 7c3adb3c7ec4285c1958b42846684924d0be3d58, upstream
+commit ca6938a1cd8a1c5e861a99b67f84ac166fc2b9e7. It removes the ability
+to switch back to lazy FPU mode, but we want to leave it in already
+released SLEs.
+
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ arch/x86/include/asm/cpufeature.h | 2
+ arch/x86/include/asm/fpu/internal.h | 2
+ arch/x86/kernel/fpu/init.c | 91 ++++++++++++++++++++++++++++++++++--
+ 3 files changed, 90 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -104,7 +104,7 @@
+ #define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
+ #define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
+ #define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
+-/* free, was #define X86_FEATURE_EAGER_FPU ( 3*32+29) * "eagerfpu" Non lazy FPU restore */
++#define X86_FEATURE_EAGER_FPU ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
+ #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
+
+ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -58,7 +58,7 @@ extern u64 fpu__get_supported_xfeatures_
+ */
+ static __always_inline __pure bool use_eager_fpu(void)
+ {
+- return true;
++ return static_cpu_has_safe(X86_FEATURE_EAGER_FPU);
+ }
+
+ static __always_inline __pure bool use_xsaveopt(void)
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -15,7 +15,10 @@
+ */
+ static void fpu__init_cpu_ctx_switch(void)
+ {
+- clts();
++ if (!boot_cpu_has(X86_FEATURE_EAGER_FPU))
++ stts();
++ else
++ clts();
+ }
+
+ /*
+@@ -232,16 +235,82 @@ static void __init fpu__init_system_xsta
+ }
+
+ /*
++ * FPU context switching strategies:
++ *
++ * Against popular belief, we don't do lazy FPU saves, due to the
++ * task migration complications it brings on SMP - we only do
++ * lazy FPU restores.
++ *
++ * 'lazy' is the traditional strategy, which is based on setting
++ * CR0::TS to 1 during context-switch (instead of doing a full
++ * restore of the FPU state), which causes the first FPU instruction
++ * after the context switch (whenever it is executed) to fault - at
++ * which point we lazily restore the FPU state into FPU registers.
++ *
++ * Tasks are of course under no obligation to execute FPU instructions,
++ * so it can easily happen that another context-switch occurs without
++ * a single FPU instruction being executed. If we eventually switch
++ * back to the original task (that still owns the FPU) then we have
++ * not only saved the restores along the way, but we also have the
++ * FPU ready to be used for the original task.
++ *
++ * 'lazy' is deprecated because it's almost never a performance win
++ * and it's much more complicated than 'eager'.
++ *
++ * 'eager' switching is by default on all CPUs, there we switch the FPU
++ * state during every context switch, regardless of whether the task
++ * has used FPU instructions in that time slice or not. This is done
++ * because modern FPU context saving instructions are able to optimize
++ * state saving and restoration in hardware: they can detect both
++ * unused and untouched FPU state and optimize accordingly.
++ *
++ * [ Note that even in 'lazy' mode we might optimize context switches
++ * to use 'eager' restores, if we detect that a task is using the FPU
++ * frequently. See the fpu->counter logic in fpu/internal.h for that. ]
++ */
++static enum { ENABLE, DISABLE } eagerfpu = ENABLE;
++
++/*
+ * Find supported xfeatures based on cpu features and command-line input.
+ * This must be called after fpu__init_parse_early_param() is called and
+ * xfeatures_mask is enumerated.
+ */
+ u64 __init fpu__get_supported_xfeatures_mask(void)
+ {
+- return XCNTXT_MASK;
++ /* Support all xfeatures known to us */
++ if (eagerfpu != DISABLE)
++ return XCNTXT_MASK;
++
++ /* Warning of xfeatures being disabled for no eagerfpu mode */
++ if (xfeatures_mask & XFEATURE_MASK_EAGER) {
++ pr_err("x86/fpu: eagerfpu switching disabled, disabling the following xstate features: 0x%llx.\n",
++ xfeatures_mask & XFEATURE_MASK_EAGER);
++ }
++
++ /* Return a mask that masks out all features requiring eagerfpu mode */
++ return ~XFEATURE_MASK_EAGER;
+ }
+
+-/* Legacy code to initialize eager fpu mode. */
++/*
++ * Disable features dependent on eagerfpu.
++ */
++static void __init fpu__clear_eager_fpu_features(void)
++{
++ setup_clear_cpu_cap(X86_FEATURE_MPX);
++}
++
++/*
++ * Pick the FPU context switching strategy:
++ *
++ * When eagerfpu is AUTO or ENABLE, we ensure it is ENABLE if either of
++ * the following is true:
++ *
++ * (1) the cpu has xsaveopt, as it has the optimization and doing eager
++ * FPU switching has a relatively low cost compared to a plain xsave;
++ * (2) the cpu has xsave features (e.g. MPX) that depend on eager FPU
++ * switching. Should the kernel boot with noxsaveopt, we support MPX
++ * with eager FPU switching at a higher cost.
++ */
+ static void __init fpu__init_system_ctx_switch(void)
+ {
+ static bool on_boot_cpu = 1;
+@@ -251,6 +320,17 @@ static void __init fpu__init_system_ctx_
+
+ WARN_ON_FPU(current->thread.fpu.fpstate_active);
+ current_thread_info()->status = 0;
++
++ if (boot_cpu_has(X86_FEATURE_XSAVEOPT) && eagerfpu != DISABLE)
++ eagerfpu = ENABLE;
++
++ if (xfeatures_mask & XFEATURE_MASK_EAGER)
++ eagerfpu = ENABLE;
++
++ if (eagerfpu == ENABLE)
++ setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);
++
++ printk(KERN_INFO "x86/fpu: Using '%s' FPU context switches.\n", eagerfpu == ENABLE ? "eager" : "lazy");
+ }
+
+ /*
+@@ -259,6 +339,11 @@ static void __init fpu__init_system_ctx_
+ */
+ static void __init fpu__init_parse_early_param(void)
+ {
++ if (cmdline_find_option_bool(boot_command_line, "eagerfpu=off")) {
++ eagerfpu = DISABLE;
++ fpu__clear_eager_fpu_features();
++ }
++
+ if (cmdline_find_option_bool(boot_command_line, "no387"))
+ setup_clear_cpu_cap(X86_FEATURE_FPU);
+
diff --git a/series.conf b/series.conf
index 968e435937..7701d9caca 100644
--- a/series.conf
+++ b/series.conf
@@ -3281,6 +3281,31 @@
patches.kernel.org/4.4.137-023-net-phy-broadcom-Fix-bcm_write_exp.patch
patches.kernel.org/4.4.137-024-net-metrics-add-proper-netlink-validation.patch
patches.kernel.org/4.4.137-025-Linux-4.4.137.patch
+ patches.kernel.org/4.4.138-001-x86-fpu-Fix-early-FPU-command-line-parsing.patch
+ patches.kernel.org/4.4.138-002-x86-Remove-unused-function-cpu_has_ht_sibling.patch
+ patches.kernel.org/4.4.138-003-x86-cpufeature-Remove-unused-and-seldomly-use.patch
+ patches.kernel.org/4.4.138-004-x86-fpu-Disable-MPX-when-eagerfpu-is-off.patch
+ patches.kernel.org/4.4.138-005-x86-fpu-Disable-AVX-when-eagerfpu-is-off.patch
+ patches.kernel.org/4.4.138-006-x86-fpu-Default-eagerfpu-on-on-all-CPUs.patch
+ patches.kernel.org/4.4.138-007-x86-fpu-Fix-no387-regression.patch
+ patches.kernel.org/4.4.138-008-x86-fpu-Revert-x86-fpu-Disable-AVX-when-eager.patch
+ patches.kernel.org/4.4.138-009-x86-fpu-Fix-eager-FPU-handling-on-legacy-FPU-.patch
+ patches.kernel.org/4.4.138-010-x86-fpu-Hard-disable-lazy-FPU-mode.patch
+ patches.kernel.org/4.4.138-011-x86-fpu-Fix-FNSAVE-usage-in-eagerfpu-mode.patch
+ patches.kernel.org/4.4.138-012-x86-fpu-Fix-math-emulation-in-eager-fpu-mode.patch
+ patches.kernel.org/4.4.138-013-af_key-Always-verify-length-of-provided-sadb_.patch
+ patches.kernel.org/4.4.138-014-x86-crypto-x86-fpu-Remove-X86_FEATURE_EAGER_F.patch
+ patches.kernel.org/4.4.138-015-gpio-No-NULL-owner.patch
+ patches.kernel.org/4.4.138-016-Clarify-and-fix-MAX_LFS_FILESIZE-macros.patch
+ patches.kernel.org/4.4.138-017-KVM-x86-introduce-linear_-read-write-_system.patch
+ patches.kernel.org/4.4.138-018-KVM-x86-pass-kvm_vcpu-to-kvm_read_guest_virt-.patch
+ patches.kernel.org/4.4.138-019-serial-samsung-fix-maxburst-parameter-for-DMA.patch
+ patches.kernel.org/4.4.138-020-vmw_balloon-fixing-double-free-when-batching-.patch
+ patches.kernel.org/4.4.138-021-kvm-x86-use-correct-privilege-level-for-sgdt-.patch
+ patches.kernel.org/4.4.138-022-Input-goodix-add-new-ACPI-id-for-GPD-Win-2-to.patch
+ patches.kernel.org/4.4.138-023-Input-elan_i2c-add-ELAN0612-Lenovo-v330-14IKB.patch
+ patches.kernel.org/4.4.138-024-crypto-vmx-Remove-overly-verbose-printk-from-.patch
+ patches.kernel.org/4.4.138-025-Linux-4.4.138.patch
########################################################
# Build fixes that apply to the vanilla kernel too.
@@ -3373,6 +3398,7 @@
patches.kabi/kabi-protect-struct-ccw_device_private.patch
patches.kabi/kabi-protect-struct-tcp_sock.patch
patches.kabi/kabi-protect-of-include-in-tpm-chip.patch
+ patches.kabi/kabi-protect-struct-x86_emulate_ops.patch
########################################################
#
@@ -3577,6 +3603,7 @@
########################################################
# Scheduler / Core
######################################################
+ patches.suse/revert-x86-fpu-Hard-disable-lazy-FPU-mode.patch
patches.suse/setuid-dumpable-wrongdir
patches.suse/sched-provide-nortsched-boot-option.patch
@@ -3958,17 +3985,10 @@
# changed this area a lot.
patches.arch/x86_64-hpet-64bit-timer.patch
- patches.suse/0001-x86-fpu-Fix-early-FPU-command-line-parsing.patch
patches.suse/0002-x86-fpu-Disable-XGETBV1-when-no-XSAVE.patch
- patches.suse/0003-x86-fpu-Disable-MPX-when-eagerfpu-is-off.patch
- patches.suse/0001-x86-fpu-Fix-no387-regression.patch
- patches.suse/0001-x86-fpu-Fix-eager-FPU-handling-on-legacy-FPU-machine.patch
- patches.suse/0001-x86-fpu-Fix-math-emulation-in-eager-fpu-mode.patch
- patches.suse/0002-x86-fpu-Fix-FNSAVE-usage-in-eagerfpu-mode.patch
patches.suse/0003-x86-fpu-Fold-fpu_copy-into-fpu__copy.patch
patches.suse/0004-x86-fpu-Speed-up-lazy-FPU-restores-slightly.patch
- patches.suse/0005-x86-fpu-Default-eagerfpu-on-on-all-CPUs.patch
# bsc#940946, bsc#937444: kexec, apic_extnmi
patches.arch/00-panic-x86-fix-re-entrance-problem-due-to-panic-on-nmi.patch