Home Home > GIT Browse > SLE15
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2018-06-14 07:39:52 +0200
committerTakashi Iwai <tiwai@suse.de>2018-06-14 07:39:52 +0200
commitd150dea711b7c025a40ef50a80566c66ec1e9963 (patch)
tree367d931b19360c73612e00c3d37a3bc6baa14e01
parent95087d72fa277dda8131e12c3b9b4823d63fecde (diff)
parentb394093d49415d205a6c9b3f96f80e2351a83286 (diff)
Merge branch 'SLE12-SP3' into openSUSE-42.3rpm-4.4.136-56
-rw-r--r--patches.arch/0003-x86-fpu-x86-mm-pkeys-add-pkru-xsave-fields-and-data-structures18
-rw-r--r--patches.arch/0046-x86-pkeys-default-to-a-restrictive-init-pkru18
-rw-r--r--patches.arch/kvm-x86-remove-code-for-lazy-fpu-handling389
-rw-r--r--patches.arch/x86-cpufeature-add-avx512_4vnniw-and-avx512_4fmaps-features.patch4
-rw-r--r--patches.arch/x86-cpufeature-enable-new-avx-512-features.patch6
-rw-r--r--patches.suse/0001-x86-fpu-Fix-eager-FPU-handling-on-legacy-FPU-machine.patch79
-rw-r--r--patches.suse/0001-x86-fpu-Fix-early-FPU-command-line-parsing.patch187
-rw-r--r--patches.suse/0001-x86-fpu-Fix-math-emulation-in-eager-fpu-mode.patch71
-rw-r--r--patches.suse/0001-x86-fpu-Fix-no387-regression.patch62
-rw-r--r--patches.suse/0002-x86-fpu-Disable-XGETBV1-when-no-XSAVE.patch47
-rw-r--r--patches.suse/0002-x86-fpu-Fix-FNSAVE-usage-in-eagerfpu-mode.patch79
-rw-r--r--patches.suse/0003-x86-fpu-Disable-MPX-when-eagerfpu-is-off.patch146
-rw-r--r--patches.suse/0003-x86-fpu-Fold-fpu_copy-into-fpu__copy.patch87
-rw-r--r--patches.suse/0004-x86-fpu-Speed-up-lazy-FPU-restores-slightly.patch41
-rw-r--r--patches.suse/0005-x86-fpu-Default-eagerfpu-on-on-all-CPUs.patch86
-rw-r--r--series.conf14
16 files changed, 1311 insertions, 23 deletions
diff --git a/patches.arch/0003-x86-fpu-x86-mm-pkeys-add-pkru-xsave-fields-and-data-structures b/patches.arch/0003-x86-fpu-x86-mm-pkeys-add-pkru-xsave-fields-and-data-structures
index 49c7814c61..bb606a5441 100644
--- a/patches.arch/0003-x86-fpu-x86-mm-pkeys-add-pkru-xsave-fields-and-data-structures
+++ b/patches.arch/0003-x86-fpu-x86-mm-pkeys-add-pkru-xsave-fields-and-data-structures
@@ -30,10 +30,10 @@ Link: http://lkml.kernel.org/r/20160212210204.56DF8F7B@viggo.jf.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Joerg Roedel <jroedel@suse.de>
---
- arch/x86/include/asm/fpu/types.h | 11 +++++++++++
- arch/x86/include/asm/fpu/xstate.h | 3 ++-
- arch/x86/kernel/fpu/xstate.c | 7 ++++++-
- 3 files changed, 19 insertions(+), 2 deletions(-)
+ arch/x86/include/asm/fpu/types.h | 12 ++++++++++++
+ arch/x86/include/asm/fpu/xstate.h | 3 ++-
+ arch/x86/kernel/fpu/xstate.c | 7 ++++++-
+ 3 files changed, 20 insertions(+), 2 deletions(-)
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -93,15 +93,15 @@ Acked-by: Joerg Roedel <jroedel@suse.de>
};
/*
-@@ -57,6 +59,7 @@ void fpu__xstate_clear_all_cpu_caps(void
- setup_clear_cpu_cap(X86_FEATURE_MPX);
+@@ -58,6 +60,7 @@ void fpu__xstate_clear_all_cpu_caps(void
+ setup_clear_cpu_cap(X86_FEATURE_XGETBV1);
setup_clear_cpu_cap(X86_FEATURE_AVX512_4VNNIW);
setup_clear_cpu_cap(X86_FEATURE_AVX512_4FMAPS);
+ setup_clear_cpu_cap(X86_FEATURE_PKU);
}
/*
-@@ -235,7 +238,7 @@ static void __init print_xstate_feature(
+@@ -236,7 +239,7 @@ static void __init print_xstate_feature(
const char *feature_name;
if (cpu_has_xfeatures(xstate_mask, &feature_name))
@@ -110,7 +110,7 @@ Acked-by: Joerg Roedel <jroedel@suse.de>
}
/*
-@@ -251,6 +254,7 @@ static void __init print_xstate_features
+@@ -252,6 +255,7 @@ static void __init print_xstate_features
print_xstate_feature(XFEATURE_MASK_OPMASK);
print_xstate_feature(XFEATURE_MASK_ZMM_Hi256);
print_xstate_feature(XFEATURE_MASK_Hi16_ZMM);
@@ -118,7 +118,7 @@ Acked-by: Joerg Roedel <jroedel@suse.de>
}
/*
-@@ -467,6 +471,7 @@ static void check_xstate_against_struct(
+@@ -468,6 +472,7 @@ static void check_xstate_against_struct(
XCHECK_SZ(sz, nr, XFEATURE_OPMASK, struct avx_512_opmask_state);
XCHECK_SZ(sz, nr, XFEATURE_ZMM_Hi256, struct avx_512_zmm_uppers_state);
XCHECK_SZ(sz, nr, XFEATURE_Hi16_ZMM, struct avx_512_hi16_state);
diff --git a/patches.arch/0046-x86-pkeys-default-to-a-restrictive-init-pkru b/patches.arch/0046-x86-pkeys-default-to-a-restrictive-init-pkru
index a0416ca08f..a2242c3408 100644
--- a/patches.arch/0046-x86-pkeys-default-to-a-restrictive-init-pkru
+++ b/patches.arch/0046-x86-pkeys-default-to-a-restrictive-init-pkru
@@ -48,16 +48,16 @@ Link: http://lkml.kernel.org/r/20160729163021.F3C25D4A@viggo.jf.intel.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Joerg Roedel <jroedel@suse.de>
---
- Documentation/kernel-parameters.txt | 5 +++++
- arch/x86/include/asm/pkeys.h | 1 +
- arch/x86/kernel/fpu/core.c | 4 ++++
- arch/x86/mm/pkeys.c | 38 +++++++++++++++++++++++++++++++++++++
- include/linux/pkeys.h | 4 ++++
+ Documentation/kernel-parameters.txt | 5 ++++
+ arch/x86/include/asm/pkeys.h | 1
+ arch/x86/kernel/fpu/core.c | 4 +++
+ arch/x86/mm/pkeys.c | 38 ++++++++++++++++++++++++++++++++++++
+ include/linux/pkeys.h | 4 +++
5 files changed, 52 insertions(+)
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
-@@ -1538,6 +1538,11 @@ bytes respectively. Such letter suffixes
+@@ -1542,6 +1542,11 @@ bytes respectively. Such letter suffixes
initrd= [BOOT] Specify the location of the initial ramdisk
@@ -88,10 +88,10 @@ Acked-by: Joerg Roedel <jroedel@suse.de>
/*
* Represents the initial FPU state. It's mostly (but not completely) zeroes,
-@@ -474,6 +475,9 @@ static inline void copy_init_fpstate_to_
- copy_kernel_to_xregs(&init_fpstate.xsave, -1);
- else
+@@ -478,6 +479,9 @@ static inline void copy_init_fpstate_to_
copy_kernel_to_fxregs(&init_fpstate.fxsave);
+ else
+ copy_kernel_to_fregs(&init_fpstate.fsave);
+
+ if (boot_cpu_has(X86_FEATURE_OSPKE))
+ copy_init_pkru_to_fpregs();
diff --git a/patches.arch/kvm-x86-remove-code-for-lazy-fpu-handling b/patches.arch/kvm-x86-remove-code-for-lazy-fpu-handling
new file mode 100644
index 0000000000..e62055e0f1
--- /dev/null
+++ b/patches.arch/kvm-x86-remove-code-for-lazy-fpu-handling
@@ -0,0 +1,389 @@
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Fri, 3 Feb 2017 21:18:52 -0800
+Subject: KVM: x86: remove code for lazy FPU handling
+Git-commit: bd7e5b0899a429445cc6e3037c13f8b5ae3be903
+Patch-mainline: v4.11-rc1
+References: bsc#1095241, CVE-2018-3665
+
+The FPU is always active now when running KVM.
+
+Reviewed-by: David Matlack <dmatlack@google.com>
+Reviewed-by: Bandan Das <bsd@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Acked-by: Joerg Roedel <jroedel@suse.de>
+---
+ arch/x86/include/asm/kvm_host.h | 3 --
+ arch/x86/kvm/cpuid.c | 2 -
+ arch/x86/kvm/svm.c | 43 ++-------------
+ arch/x86/kvm/vmx.c | 112 ++++++----------------------------------
+ arch/x86/kvm/x86.c | 7 +--
+ include/linux/kvm_host.h | 1 -
+ 6 files changed, 19 insertions(+), 149 deletions(-)
+
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -122,10 +122,6 @@ int kvm_update_cpuid(struct kvm_vcpu *vc
+ if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
+ best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
+
+- vcpu->arch.eager_fpu = use_eager_fpu();
+- if (vcpu->arch.eager_fpu)
+- kvm_x86_ops->fpu_activate(vcpu);
+-
+ /*
+ * The existing code assumes virtual address is 48-bit in the canonical
+ * address checks; exit if it is ever changed.
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1192,7 +1192,6 @@ static void init_vmcb(struct vcpu_svm *s
+ struct vmcb_control_area *control = &svm->vmcb->control;
+ struct vmcb_save_area *save = &svm->vmcb->save;
+
+- svm->vcpu.fpu_active = 1;
+ svm->vcpu.arch.hflags = 0;
+
+ set_cr_intercept(svm, INTERCEPT_CR0_READ);
+@@ -1951,15 +1950,12 @@ static void update_cr0_intercept(struct
+ ulong gcr0 = svm->vcpu.arch.cr0;
+ u64 *hcr0 = &svm->vmcb->save.cr0;
+
+- if (!svm->vcpu.fpu_active)
+- *hcr0 |= SVM_CR0_SELECTIVE_MASK;
+- else
+- *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
+- | (gcr0 & SVM_CR0_SELECTIVE_MASK);
++ *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
++ | (gcr0 & SVM_CR0_SELECTIVE_MASK);
+
+ mark_dirty(svm->vmcb, VMCB_CR);
+
+- if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
++ if (gcr0 == *hcr0) {
+ clr_cr_intercept(svm, INTERCEPT_CR0_READ);
+ clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
+ } else {
+@@ -1990,8 +1986,6 @@ static void svm_set_cr0(struct kvm_vcpu
+ if (!npt_enabled)
+ cr0 |= X86_CR0_PG | X86_CR0_WP;
+
+- if (!vcpu->fpu_active)
+- cr0 |= X86_CR0_TS;
+ /*
+ * re-enable caching here because the QEMU bios
+ * does not do it - this results in some delay at
+@@ -2209,22 +2203,6 @@ static int ac_interception(struct vcpu_s
+ return 1;
+ }
+
+-static void svm_fpu_activate(struct kvm_vcpu *vcpu)
+-{
+- struct vcpu_svm *svm = to_svm(vcpu);
+-
+- clr_exception_intercept(svm, NM_VECTOR);
+-
+- svm->vcpu.fpu_active = 1;
+- update_cr0_intercept(svm);
+-}
+-
+-static int nm_interception(struct vcpu_svm *svm)
+-{
+- svm_fpu_activate(&svm->vcpu);
+- return 1;
+-}
+-
+ static bool is_erratum_383(void)
+ {
+ int err, i;
+@@ -2622,9 +2600,6 @@ static int nested_svm_exit_special(struc
+ if (!npt_enabled && svm->apf_reason == 0)
+ return NESTED_EXIT_HOST;
+ break;
+- case SVM_EXIT_EXCP_BASE + NM_VECTOR:
+- nm_interception(svm);
+- break;
+ default:
+ break;
+ }
+@@ -4104,7 +4079,6 @@ static int (*const svm_exit_handlers[])(
+ [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
+ [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
+ [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
+- [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
+ [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
+ [SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception,
+ [SVM_EXIT_INTR] = intr_interception,
+@@ -5187,14 +5161,6 @@ static bool svm_has_wbinvd_exit(void)
+ return true;
+ }
+
+-static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
+-{
+- struct vcpu_svm *svm = to_svm(vcpu);
+-
+- set_exception_intercept(svm, NM_VECTOR);
+- update_cr0_intercept(svm);
+-}
+-
+ #define PRE_EX(exit) { .exit_code = (exit), \
+ .stage = X86_ICPT_PRE_EXCEPT, }
+ #define POST_EX(exit) { .exit_code = (exit), \
+@@ -5451,9 +5417,6 @@ static struct kvm_x86_ops svm_x86_ops =
+ .get_pkru = svm_get_pkru,
+ .set_pkru = svm_set_pkru,
+
+- .fpu_activate = svm_fpu_activate,
+- .fpu_deactivate = svm_fpu_deactivate,
+-
+ .tlb_flush = svm_flush_tlb,
+
+ .run = svm_vcpu_run,
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -1679,7 +1679,7 @@ static void update_exception_bitmap(stru
+ u32 eb;
+
+ eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
+- (1u << NM_VECTOR) | (1u << DB_VECTOR) | (1u << AC_VECTOR);
++ (1u << DB_VECTOR) | (1u << AC_VECTOR);
+ if ((vcpu->guest_debug &
+ (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
+ (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
+@@ -1688,8 +1688,6 @@ static void update_exception_bitmap(stru
+ eb = ~0;
+ if (enable_ept)
+ eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
+- if (vcpu->fpu_active)
+- eb &= ~(1u << NM_VECTOR);
+
+ /* When we are running a nested L2 guest and L1 specified for it a
+ * certain exception bitmap, we must trap the same exceptions and pass
+@@ -2167,25 +2165,6 @@ static void vmx_vcpu_put(struct kvm_vcpu
+ }
+ }
+
+-static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
+-{
+- ulong cr0;
+-
+- if (vcpu->fpu_active)
+- return;
+- vcpu->fpu_active = 1;
+- cr0 = vmcs_readl(GUEST_CR0);
+- cr0 &= ~(X86_CR0_TS | X86_CR0_MP);
+- cr0 |= kvm_read_cr0_bits(vcpu, X86_CR0_TS | X86_CR0_MP);
+- vmcs_writel(GUEST_CR0, cr0);
+- update_exception_bitmap(vcpu);
+- vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
+- if (is_guest_mode(vcpu))
+- vcpu->arch.cr0_guest_owned_bits &=
+- ~get_vmcs12(vcpu)->cr0_guest_host_mask;
+- vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
+-}
+-
+ static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
+
+ /*
+@@ -2204,33 +2183,6 @@ static inline unsigned long nested_read_
+ (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
+ }
+
+-static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
+-{
+- /* Note that there is no vcpu->fpu_active = 0 here. The caller must
+- * set this *before* calling this function.
+- */
+- vmx_decache_cr0_guest_bits(vcpu);
+- vmcs_set_bits(GUEST_CR0, X86_CR0_TS | X86_CR0_MP);
+- update_exception_bitmap(vcpu);
+- vcpu->arch.cr0_guest_owned_bits = 0;
+- vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
+- if (is_guest_mode(vcpu)) {
+- /*
+- * L1's specified read shadow might not contain the TS bit,
+- * so now that we turned on shadowing of this bit, we need to
+- * set this bit of the shadow. Like in nested_vmx_run we need
+- * nested_read_cr0(vmcs12), but vmcs12->guest_cr0 is not yet
+- * up-to-date here because we just decached cr0.TS (and we'll
+- * only update vmcs12->guest_cr0 on nested exit).
+- */
+- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+- vmcs12->guest_cr0 = (vmcs12->guest_cr0 & ~X86_CR0_TS) |
+- (vcpu->arch.cr0 & X86_CR0_TS);
+- vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
+- } else
+- vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
+-}
+-
+ static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
+ {
+ unsigned long rflags, save_rflags;
+@@ -3829,9 +3781,6 @@ static void vmx_set_cr0(struct kvm_vcpu
+ if (enable_ept)
+ ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
+
+- if (!vcpu->fpu_active)
+- hw_cr0 |= X86_CR0_TS | X86_CR0_MP;
+-
+ vmcs_writel(CR0_READ_SHADOW, cr0);
+ vmcs_writel(GUEST_CR0, hw_cr0);
+ vcpu->arch.cr0 = cr0;
+@@ -4971,7 +4920,9 @@ static int vmx_vcpu_setup(struct vcpu_vm
+ /* 22.2.1, 20.8.1 */
+ vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl);
+
+- vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
++ vmx->vcpu.arch.cr0_guest_owned_bits = X86_CR0_TS;
++ vmcs_writel(CR0_GUEST_HOST_MASK, ~X86_CR0_TS);
++
+ set_cr4_guest_host_mask(vmx);
+
+ if (vmx_xsaves_supported())
+@@ -5075,7 +5026,7 @@ static void vmx_vcpu_reset(struct kvm_vc
+ vmx_set_cr0(vcpu, cr0); /* enter rmode */
+ vmx_set_cr4(vcpu, 0);
+ vmx_set_efer(vcpu, 0);
+- vmx_fpu_activate(vcpu);
++
+ update_exception_bitmap(vcpu);
+
+ vpid_sync_context(vmx->vpid);
+@@ -5353,11 +5304,6 @@ static int handle_exception(struct kvm_v
+ if (is_nmi(intr_info))
+ return 1; /* already handled by vmx_vcpu_run() */
+
+- if (is_no_device(intr_info)) {
+- vmx_fpu_activate(vcpu);
+- return 1;
+- }
+-
+ if (is_invalid_opcode(intr_info)) {
+ if (is_guest_mode(vcpu)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+@@ -5555,22 +5501,6 @@ static int handle_set_cr4(struct kvm_vcp
+ return kvm_set_cr4(vcpu, val);
+ }
+
+-/* called to set cr0 as approriate for clts instruction exit. */
+-static void handle_clts(struct kvm_vcpu *vcpu)
+-{
+- if (is_guest_mode(vcpu)) {
+- /*
+- * We get here when L2 did CLTS, and L1 didn't shadow CR0.TS
+- * but we did (!fpu_active). We need to keep GUEST_CR0.TS on,
+- * just pretend it's off (also in arch.cr0 for fpu_activate).
+- */
+- vmcs_writel(CR0_READ_SHADOW,
+- vmcs_readl(CR0_READ_SHADOW) & ~X86_CR0_TS);
+- vcpu->arch.cr0 &= ~X86_CR0_TS;
+- } else
+- vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
+-}
+-
+ static int handle_cr(struct kvm_vcpu *vcpu)
+ {
+ unsigned long exit_qualification, val;
+@@ -5613,10 +5543,10 @@ static int handle_cr(struct kvm_vcpu *vc
+ }
+ break;
+ case 2: /* clts */
+- handle_clts(vcpu);
++ WARN_ONCE(1, "Guest should always own CR0.TS");
++ vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
+ trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
+ skip_emulated_instruction(vcpu);
+- vmx_fpu_activate(vcpu);
+ return 1;
+ case 1: /*mov from cr*/
+ switch (cr) {
+@@ -9932,8 +9862,8 @@ static void prepare_vmcs02(struct kvm_vc
+ vmx_set_efer(vcpu, vcpu->arch.efer);
+
+ /*
+- * This sets GUEST_CR0 to vmcs12->guest_cr0, with possibly a modified
+- * TS bit (for lazy fpu) and bits which we consider mandatory enabled.
++ * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
++ * bits which we consider mandatory enabled.
+ * The CR0_READ_SHADOW is what L2 should have expected to read given
+ * the specifications by L1; It's not enough to take
+ * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
+@@ -10479,24 +10409,15 @@ static void load_vmcs12_host_state(struc
+ vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
+ /*
+ * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
+- * actually changed, because it depends on the current state of
+- * fpu_active (which may have changed).
+- * Note that vmx_set_cr0 refers to efer set above.
++ * actually changed, because vmx_set_cr0 refers to efer set above.
++ *
++ * CR0_GUEST_HOST_MASK is already set in the original vmcs01
++ * (KVM doesn't change it);
+ */
++ vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
+ vmx_set_cr0(vcpu, vmcs12->host_cr0);
+- /*
+- * If we did fpu_activate()/fpu_deactivate() during L2's run, we need
+- * to apply the same changes to L1's vmcs. We just set cr0 correctly,
+- * but we also need to update cr0_guest_host_mask and exception_bitmap.
+- */
+- update_exception_bitmap(vcpu);
+- vcpu->arch.cr0_guest_owned_bits = (vcpu->fpu_active ? X86_CR0_TS : 0);
+- vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
+
+- /*
+- * Note that CR4_GUEST_HOST_MASK is already set in the original vmcs01
+- * (KVM doesn't change it)- no reason to call set_cr4_guest_host_mask();
+- */
++ /* Same as above - no reason to call set_cr4_guest_host_mask(). */
+ vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
+ vmx_set_cr4(vcpu, vmcs12->host_cr4);
+
+@@ -11025,9 +10946,6 @@ static struct kvm_x86_ops vmx_x86_ops =
+ .get_pkru = vmx_get_pkru,
+ .set_pkru = vmx_set_pkru,
+
+- .fpu_activate = vmx_fpu_activate,
+- .fpu_deactivate = vmx_fpu_deactivate,
+-
+ .tlb_flush = vmx_flush_tlb,
+
+ .run = vmx_vcpu_run,
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -6538,10 +6538,6 @@ static int vcpu_enter_guest(struct kvm_v
+ r = 0;
+ goto out;
+ }
+- if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
+- vcpu->fpu_active = 0;
+- kvm_x86_ops->fpu_deactivate(vcpu);
+- }
+ if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
+ /* Page is swapped out. Do synthetic halt */
+ vcpu->arch.apf.halted = true;
+@@ -6642,8 +6638,7 @@ static int vcpu_enter_guest(struct kvm_v
+ preempt_disable();
+
+ kvm_x86_ops->prepare_guest_switch(vcpu);
+- if (vcpu->fpu_active)
+- kvm_load_guest_fpu(vcpu);
++ kvm_load_guest_fpu(vcpu);
+ vcpu->mode = IN_GUEST_MODE;
+
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+@@ -7393,16 +7388,6 @@ void kvm_put_guest_fpu(struct kvm_vcpu *
+ copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
+ __kernel_fpu_end();
+ ++vcpu->stat.fpu_reload;
+- /*
+- * If using eager FPU mode, or if the guest is a frequent user
+- * of the FPU, just leave the FPU active for next time.
+- * Every 255 times fpu_counter rolls over to 0; a guest that uses
+- * the FPU in bursts will revert to loading it on demand.
+- */
+- if (!vcpu->arch.eager_fpu) {
+- if (++vcpu->fpu_counter < 5)
+- kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
+- }
+ trace_kvm_fpu(0);
+ }
+
diff --git a/patches.arch/x86-cpufeature-add-avx512_4vnniw-and-avx512_4fmaps-features.patch b/patches.arch/x86-cpufeature-add-avx512_4vnniw-and-avx512_4fmaps-features.patch
index 4375831fb8..fc979c31a0 100644
--- a/patches.arch/x86-cpufeature-add-avx512_4vnniw-and-avx512_4fmaps-features.patch
+++ b/patches.arch/x86-cpufeature-add-avx512_4vnniw-and-avx512_4fmaps-features.patch
@@ -64,10 +64,10 @@ Acked-by: Borislav Petkov <bp@suse.de>
{ X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 },
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
-@@ -55,6 +55,8 @@ void fpu__xstate_clear_all_cpu_caps(void
- setup_clear_cpu_cap(X86_FEATURE_AVX512BW);
+@@ -56,6 +56,8 @@ void fpu__xstate_clear_all_cpu_caps(void
setup_clear_cpu_cap(X86_FEATURE_AVX512VL);
setup_clear_cpu_cap(X86_FEATURE_MPX);
+ setup_clear_cpu_cap(X86_FEATURE_XGETBV1);
+ setup_clear_cpu_cap(X86_FEATURE_AVX512_4VNNIW);
+ setup_clear_cpu_cap(X86_FEATURE_AVX512_4FMAPS);
}
diff --git a/patches.arch/x86-cpufeature-enable-new-avx-512-features.patch b/patches.arch/x86-cpufeature-enable-new-avx-512-features.patch
index 5e8deacf04..2c6f64dee8 100644
--- a/patches.arch/x86-cpufeature-enable-new-avx-512-features.patch
+++ b/patches.arch/x86-cpufeature-enable-new-avx-512-features.patch
@@ -45,7 +45,7 @@ Acked-by: Borislav Petkov <bp@suse.de>
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
-@@ -235,6 +235,7 @@
+@@ -242,6 +242,7 @@
#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
@@ -53,7 +53,7 @@ Acked-by: Borislav Petkov <bp@suse.de>
#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
-@@ -245,6 +246,8 @@
+@@ -252,6 +253,8 @@
#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
@@ -72,5 +72,5 @@ Acked-by: Borislav Petkov <bp@suse.de>
+ setup_clear_cpu_cap(X86_FEATURE_AVX512BW);
+ setup_clear_cpu_cap(X86_FEATURE_AVX512VL);
setup_clear_cpu_cap(X86_FEATURE_MPX);
+ setup_clear_cpu_cap(X86_FEATURE_XGETBV1);
}
-
diff --git a/patches.suse/0001-x86-fpu-Fix-eager-FPU-handling-on-legacy-FPU-machine.patch b/patches.suse/0001-x86-fpu-Fix-eager-FPU-handling-on-legacy-FPU-machine.patch
new file mode 100644
index 0000000000..c7664c15d1
--- /dev/null
+++ b/patches.suse/0001-x86-fpu-Fix-eager-FPU-handling-on-legacy-FPU-machine.patch
@@ -0,0 +1,79 @@
+From: Borislav Petkov <bp@alien8.de>
+Date: Fri, 11 Mar 2016 12:32:06 +0100
+Subject: x86/fpu: Fix eager-FPU handling on legacy FPU machines
+Git-commit: 6e6867093de35141f0a76b66ac13f9f2e2c8e77a
+Patch-mainline: v4.5
+References: bnc#1087086 CVE-2018-3665
+
+i486 derived cores like Intel Quark support only the very old,
+legacy x87 FPU (FSAVE/FRSTOR, CPUID bit FXSR is not set), and
+our FPU code wasn't handling the saving and restoring there
+properly in the 'eagerfpu' case.
+
+So after we made eagerfpu the default for all CPU types:
+
+ 58122bf1d856 x86/fpu: Default eagerfpu=on on all CPUs
+
+these old FPU designs broke. First, Andy Shevchenko reported a splat:
+
+ WARNING: CPU: 0 PID: 823 at arch/x86/include/asm/fpu/internal.h:163 fpu__clear+0x8c/0x160
+
+which was us trying to execute FXRSTOR on those machines even though
+they don't support it.
+
+After taking care of that, Bryan O'Donoghue reported that a simple FPU
+test still failed because we weren't initializing the FPU state properly
+on those machines.
+
+Take care of all that.
+
+Reported-and-tested-by: Bryan O'Donoghue <pure.logic@nexus-software.ie>
+Reported-by: Andy Shevchenko <andy.shevchenko@gmail.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Yu-cheng <yu-cheng.yu@intel.com>
+Link: http://lkml.kernel.org/r/20160311113206.GD4312@pd.tnic
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ arch/x86/kernel/fpu/core.c | 4 +++-
+ arch/x86/kernel/fpu/init.c | 2 +-
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -409,8 +409,10 @@ static inline void copy_init_fpstate_to_
+ {
+ if (use_xsave())
+ copy_kernel_to_xregs(&init_fpstate.xsave, -1);
+- else
++ else if (static_cpu_has(X86_FEATURE_FXSR))
+ copy_kernel_to_fxregs(&init_fpstate.fxsave);
++ else
++ copy_kernel_to_fregs(&init_fpstate.fsave);
+ }
+
+ /*
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -135,7 +135,7 @@ static void __init fpu__init_system_gene
+ * Set up the legacy init FPU context. (xstate init might overwrite this
+ * with a more modern format, if the CPU supports it.)
+ */
+- fpstate_init_fxstate(&init_fpstate.fxsave);
++ fpstate_init(&init_fpstate);
+
+ fpu__init_system_mxcsr();
+ }
diff --git a/patches.suse/0001-x86-fpu-Fix-early-FPU-command-line-parsing.patch b/patches.suse/0001-x86-fpu-Fix-early-FPU-command-line-parsing.patch
new file mode 100644
index 0000000000..3b6768f1ef
--- /dev/null
+++ b/patches.suse/0001-x86-fpu-Fix-early-FPU-command-line-parsing.patch
@@ -0,0 +1,187 @@
+From: yu-cheng yu <yu-cheng.yu@intel.com>
+Date: Wed, 6 Jan 2016 14:24:51 -0800
+Subject: x86/fpu: Fix early FPU command-line parsing
+Git-commit: 4f81cbafcce2c603db7865e9d0e461f7947d77d4
+Patch-mainline: v4.5-rc1
+References: bnc#1087086 CVE-2018-3665
+
+The function fpu__init_system() is executed before
+parse_early_param(). This causes wrong FPU configuration. This
+patch fixes this issue by parsing boot_command_line in the
+beginning of fpu__init_system().
+
+With all four patches in this series, each parameter disables
+features as the following:
+
+eagerfpu=off: eagerfpu, avx, avx2, avx512, mpx
+no387: fpu
+nofxsr: fxsr, fxsropt, xmm
+noxsave: xsave, xsaveopt, xsaves, xsavec, avx, avx2, avx512,
+mpx, xgetbv1 noxsaveopt: xsaveopt
+noxsaves: xsaves
+
+Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
+Cc: Ravi V. Shankar <ravi.v.shankar@intel.com>
+Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: yu-cheng yu <yu-cheng.yu@intel.com>
+Link: http://lkml.kernel.org/r/1452119094-7252-2-git-send-email-yu-cheng.yu@intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ arch/x86/kernel/fpu/init.c | 109 +++++++++++++++------------------------------
+ 1 file changed, 38 insertions(+), 71 deletions(-)
+
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -3,8 +3,11 @@
+ */
+ #include <asm/fpu/internal.h>
+ #include <asm/tlbflush.h>
++#include <asm/setup.h>
++#include <asm/cmdline.h>
+
+ #include <linux/sched.h>
++#include <linux/init.h>
+
+ /*
+ * Initialize the TS bit in CR0 according to the style of context-switches
+@@ -271,18 +274,6 @@ static void __init fpu__init_system_xsta
+ */
+ static enum { AUTO, ENABLE, DISABLE } eagerfpu = AUTO;
+
+-static int __init eager_fpu_setup(char *s)
+-{
+- if (!strcmp(s, "on"))
+- eagerfpu = ENABLE;
+- else if (!strcmp(s, "off"))
+- eagerfpu = DISABLE;
+- else if (!strcmp(s, "auto"))
+- eagerfpu = AUTO;
+- return 1;
+-}
+-__setup("eagerfpu=", eager_fpu_setup);
+-
+ /*
+ * Pick the FPU context switching strategy:
+ */
+@@ -317,11 +308,46 @@ static void __init fpu__init_system_ctx_
+ }
+
+ /*
++ * We parse fpu parameters early because fpu__init_system() is executed
++ * before parse_early_param().
++ */
++static void __init fpu__init_parse_early_param(void)
++{
++ /*
++ * No need to check "eagerfpu=auto" again, since it is the
++ * initial default.
++ */
++ if (cmdline_find_option_bool(boot_command_line, "eagerfpu=off"))
++ eagerfpu = DISABLE;
++ else if (cmdline_find_option_bool(boot_command_line, "eagerfpu=on"))
++ eagerfpu = ENABLE;
++
++ if (cmdline_find_option_bool(boot_command_line, "no387"))
++ setup_clear_cpu_cap(X86_FEATURE_FPU);
++
++ if (cmdline_find_option_bool(boot_command_line, "nofxsr")) {
++ setup_clear_cpu_cap(X86_FEATURE_FXSR);
++ setup_clear_cpu_cap(X86_FEATURE_FXSR_OPT);
++ setup_clear_cpu_cap(X86_FEATURE_XMM);
++ }
++
++ if (cmdline_find_option_bool(boot_command_line, "noxsave"))
++ fpu__xstate_clear_all_cpu_caps();
++
++ if (cmdline_find_option_bool(boot_command_line, "noxsaveopt"))
++ setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
++
++ if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
++ setup_clear_cpu_cap(X86_FEATURE_XSAVES);
++}
++
++/*
+ * Called on the boot CPU once per system bootup, to set up the initial
+ * FPU state that is later cloned into all processes:
+ */
+ void __init fpu__init_system(struct cpuinfo_x86 *c)
+ {
++ fpu__init_parse_early_param();
+ fpu__init_system_early_generic(c);
+
+ /*
+@@ -345,62 +371,3 @@ void __init fpu__init_system(struct cpui
+
+ fpu__init_system_ctx_switch();
+ }
+-
+-/*
+- * Boot parameter to turn off FPU support and fall back to math-emu:
+- */
+-static int __init no_387(char *s)
+-{
+- setup_clear_cpu_cap(X86_FEATURE_FPU);
+- return 1;
+-}
+-__setup("no387", no_387);
+-
+-/*
+- * Disable all xstate CPU features:
+- */
+-static int __init x86_noxsave_setup(char *s)
+-{
+- if (strlen(s))
+- return 0;
+-
+- fpu__xstate_clear_all_cpu_caps();
+-
+- return 1;
+-}
+-__setup("noxsave", x86_noxsave_setup);
+-
+-/*
+- * Disable the XSAVEOPT instruction specifically:
+- */
+-static int __init x86_noxsaveopt_setup(char *s)
+-{
+- setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
+-
+- return 1;
+-}
+-__setup("noxsaveopt", x86_noxsaveopt_setup);
+-
+-/*
+- * Disable the XSAVES instruction:
+- */
+-static int __init x86_noxsaves_setup(char *s)
+-{
+- setup_clear_cpu_cap(X86_FEATURE_XSAVES);
+-
+- return 1;
+-}
+-__setup("noxsaves", x86_noxsaves_setup);
+-
+-/*
+- * Disable FX save/restore and SSE support:
+- */
+-static int __init x86_nofxsr_setup(char *s)
+-{
+- setup_clear_cpu_cap(X86_FEATURE_FXSR);
+- setup_clear_cpu_cap(X86_FEATURE_FXSR_OPT);
+- setup_clear_cpu_cap(X86_FEATURE_XMM);
+-
+- return 1;
+-}
+-__setup("nofxsr", x86_nofxsr_setup);
diff --git a/patches.suse/0001-x86-fpu-Fix-math-emulation-in-eager-fpu-mode.patch b/patches.suse/0001-x86-fpu-Fix-math-emulation-in-eager-fpu-mode.patch
new file mode 100644
index 0000000000..1b32da6b61
--- /dev/null
+++ b/patches.suse/0001-x86-fpu-Fix-math-emulation-in-eager-fpu-mode.patch
@@ -0,0 +1,71 @@
+From: Andy Lutomirski <luto@kernel.org>
+Date: Sun, 24 Jan 2016 14:38:06 -0800
+Subject: x86/fpu: Fix math emulation in eager fpu mode
+Git-commit: 4ecd16ec7059390b430af34bd8bc3ca2b5dcef9a
+Patch-mainline: v4.6-rc1
+References: bnc#1087086 CVE-2018-3665
+
+Systems without an FPU are generally old and therefore use lazy FPU
+switching. Unsurprisingly, math emulation in eager FPU mode is a
+bit buggy. Fix it.
+
+There were two bugs involving kernel code trying to use the FPU
+registers in eager mode even if they didn't exist and one BUG_ON()
+that was incorrect.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: yu-cheng yu <yu-cheng.yu@intel.com>
+Link: http://lkml.kernel.org/r/b4b8d112436bd6fab866e1b4011131507e8d7fbe.1453675014.git.luto@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ arch/x86/include/asm/fpu/internal.h | 3 ++-
+ arch/x86/kernel/fpu/core.c | 2 +-
+ arch/x86/kernel/traps.c | 1 -
+ 3 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -596,7 +596,8 @@ switch_fpu_prepare(struct fpu *old_fpu,
+ * If the task has used the math, pre-load the FPU on xsave processors
+ * or if the past 5 consecutive context-switches used math.
+ */
+- fpu.preload = new_fpu->fpstate_active &&
++ fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
++ new_fpu->fpstate_active &&
+ (use_eager_fpu() || new_fpu->counter > 5);
+
+ if (old_fpu->fpregs_active) {
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -425,7 +425,7 @@ void fpu__clear(struct fpu *fpu)
+ {
+ WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
+
+- if (!use_eager_fpu()) {
++ if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) {
+ /* FPU state will be reallocated lazily at the first use. */
+ fpu__drop(fpu);
+ } else {
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -751,7 +751,6 @@ dotraplinkage void
+ do_device_not_available(struct pt_regs *regs, long error_code)
+ {
+ RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
+- BUG_ON(use_eager_fpu());
+
+ #ifdef CONFIG_MATH_EMULATION
+ if (read_cr0() & X86_CR0_EM) {
diff --git a/patches.suse/0001-x86-fpu-Fix-no387-regression.patch b/patches.suse/0001-x86-fpu-Fix-no387-regression.patch
new file mode 100644
index 0000000000..f34da85b97
--- /dev/null
+++ b/patches.suse/0001-x86-fpu-Fix-no387-regression.patch
@@ -0,0 +1,62 @@
+From: Andy Lutomirski <luto@kernel.org>
+Date: Thu, 21 Jan 2016 15:24:31 -0800
+Subject: x86/fpu: Fix 'no387' regression
+Git-commit: f363938c70a04e6bc99023a5e0c44ef7879b903f
+Patch-mainline: v4.5
+References: bnc#1087086 CVE-2018-3665
+
+After fixing FPU option parsing, we now parse the 'no387' boot option
+too early: no387 clears X86_FEATURE_FPU before it's even probed, so
+the boot CPU promptly re-enables it.
+
+I suspect it gets even more confused on SMP.
+
+Fix the probing code to leave X86_FEATURE_FPU off if it's been
+disabled by setup_clear_cpu_cap().
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
+Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: yu-cheng yu <yu-cheng.yu@intel.com>
+Fixes: 4f81cbafcce2 ("x86/fpu: Fix early FPU command-line parsing")
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ arch/x86/kernel/fpu/init.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -78,13 +78,15 @@ static void fpu__init_system_early_gener
+ cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
+ write_cr0(cr0);
+
+- asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
+- : "+m" (fsw), "+m" (fcw));
++ if (!test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) {
++ asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
++ : "+m" (fsw), "+m" (fcw));
+
+- if (fsw == 0 && (fcw & 0x103f) == 0x003f)
+- set_cpu_cap(c, X86_FEATURE_FPU);
+- else
+- clear_cpu_cap(c, X86_FEATURE_FPU);
++ if (fsw == 0 && (fcw & 0x103f) == 0x003f)
++ set_cpu_cap(c, X86_FEATURE_FPU);
++ else
++ clear_cpu_cap(c, X86_FEATURE_FPU);
++ }
+
+ #ifndef CONFIG_MATH_EMULATION
+ if (!cpu_has_fpu) {
diff --git a/patches.suse/0002-x86-fpu-Disable-XGETBV1-when-no-XSAVE.patch b/patches.suse/0002-x86-fpu-Disable-XGETBV1-when-no-XSAVE.patch
new file mode 100644
index 0000000000..86484fb137
--- /dev/null
+++ b/patches.suse/0002-x86-fpu-Disable-XGETBV1-when-no-XSAVE.patch
@@ -0,0 +1,47 @@
+From: yu-cheng yu <yu-cheng.yu@intel.com>
+Date: Wed, 6 Jan 2016 14:24:52 -0800
+Subject: x86/fpu: Disable XGETBV1 when no XSAVE
+Git-commit: eb7c5f872e697b0aebd846cf3a3328d71e9decb2
+Patch-mainline: v4.5-rc1
+References: bnc#1087086 CVE-2018-3665
+
+When "noxsave" is given as a command-line input, the kernel
+should disable XGETBV1. This issue currently does not cause any
+actual problems. XGETBV1 is only useful if we have something
+using the 'init optimization' (i.e. xsaveopt, xsaves). We
+already clear both of those in fpu__xstate_clear_all_cpu_caps().
+But this is good for completeness.
+
+Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
+Reviewed-by: Dave Hansen <dave.hansen@intel.com>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
+Cc: Ravi V. Shankar <ravi.v.shankar@intel.com>
+Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: yu-cheng yu <yu-cheng.yu@intel.com>
+Link: http://lkml.kernel.org/r/1452119094-7252-3-git-send-email-yu-cheng.yu@intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ arch/x86/kernel/fpu/xstate.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -52,6 +52,7 @@ void fpu__xstate_clear_all_cpu_caps(void
+ setup_clear_cpu_cap(X86_FEATURE_AVX512ER);
+ setup_clear_cpu_cap(X86_FEATURE_AVX512CD);
+ setup_clear_cpu_cap(X86_FEATURE_MPX);
++ setup_clear_cpu_cap(X86_FEATURE_XGETBV1);
+ }
+
+ /*
diff --git a/patches.suse/0002-x86-fpu-Fix-FNSAVE-usage-in-eagerfpu-mode.patch b/patches.suse/0002-x86-fpu-Fix-FNSAVE-usage-in-eagerfpu-mode.patch
new file mode 100644
index 0000000000..ed942b3288
--- /dev/null
+++ b/patches.suse/0002-x86-fpu-Fix-FNSAVE-usage-in-eagerfpu-mode.patch
@@ -0,0 +1,79 @@
+From: Andy Lutomirski <luto@kernel.org>
+Date: Sun, 24 Jan 2016 14:38:07 -0800
+Subject: x86/fpu: Fix FNSAVE usage in eagerfpu mode
+Git-commit: 5ed73f40735c68d8a656b46d09b1885d3b8740ae
+Patch-mainline: v4.6-rc1
+References: bnc#1087086 CVE-2018-3665
+
+In eager fpu mode, having deactivated FPU without immediately
+reloading some other context is illegal. Therefore, to recover from
+FNSAVE, we can't just deactivate the state -- we need to reload it
+if we're not actively context switching.
+
+We had this wrong in fpu__save() and fpu__copy(). Fix both.
+__kernel_fpu_begin() was fine -- add a comment.
+
+This fixes a warning triggerable with nofxsr eagerfpu=on.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: yu-cheng yu <yu-cheng.yu@intel.com>
+Link: http://lkml.kernel.org/r/60662444e13c76f06e23c15c5dcdba31b4ac3d67.1453675014.git.luto@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ arch/x86/kernel/fpu/core.c | 18 +++++++++++++++---
+ 1 file changed, 15 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -114,6 +114,10 @@ void __kernel_fpu_begin(void)
+ kernel_fpu_disable();
+
+ if (fpu->fpregs_active) {
++ /*
++ * Ignore return value -- we don't care if reg state
++ * is clobbered.
++ */
+ copy_fpregs_to_fpstate(fpu);
+ } else {
+ this_cpu_write(fpu_fpregs_owner_ctx, NULL);
+@@ -189,8 +193,12 @@ void fpu__save(struct fpu *fpu)
+
+ preempt_disable();
+ if (fpu->fpregs_active) {
+- if (!copy_fpregs_to_fpstate(fpu))
+- fpregs_deactivate(fpu);
++ if (!copy_fpregs_to_fpstate(fpu)) {
++ if (use_eager_fpu())
++ copy_kernel_to_fpregs(&fpu->state);
++ else
++ fpregs_deactivate(fpu);
++ }
+ }
+ preempt_enable();
+ }
+@@ -259,7 +267,11 @@ static void fpu_copy(struct fpu *dst_fpu
+ preempt_disable();
+ if (!copy_fpregs_to_fpstate(dst_fpu)) {
+ memcpy(&src_fpu->state, &dst_fpu->state, xstate_size);
+- fpregs_deactivate(src_fpu);
++
++ if (use_eager_fpu())
++ copy_kernel_to_fpregs(&src_fpu->state);
++ else
++ fpregs_deactivate(src_fpu);
+ }
+ preempt_enable();
+ }
diff --git a/patches.suse/0003-x86-fpu-Disable-MPX-when-eagerfpu-is-off.patch b/patches.suse/0003-x86-fpu-Disable-MPX-when-eagerfpu-is-off.patch
new file mode 100644
index 0000000000..bc311a805f
--- /dev/null
+++ b/patches.suse/0003-x86-fpu-Disable-MPX-when-eagerfpu-is-off.patch
@@ -0,0 +1,146 @@
+From: yu-cheng yu <yu-cheng.yu@intel.com>
+Date: Wed, 6 Jan 2016 14:24:53 -0800
+Subject: x86/fpu: Disable MPX when eagerfpu is off
+Git-commit: a5fe93a549c54838063d2952dd9643b0b18aa67f
+Patch-mainline: v4.5-rc1
+References: bnc#1087086 CVE-2018-3665
+
+This issue is a fallout from the command-line parsing move.
+
+When "eagerfpu=off" is given as a command-line input, the kernel
+should disable MPX support. The decision for turning off MPX was
+made in fpu__init_system_ctx_switch(), which is after the
+selection of the XSAVE format. This patch fixes it by getting
+that decision done earlier in fpu__init_system_xstate().
+
+Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
+Cc: Ravi V. Shankar <ravi.v.shankar@intel.com>
+Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: yu-cheng yu <yu-cheng.yu@intel.com>
+Link: http://lkml.kernel.org/r/1452119094-7252-4-git-send-email-yu-cheng.yu@intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ arch/x86/include/asm/fpu/internal.h | 1
+ arch/x86/kernel/fpu/init.c | 56 ++++++++++++++++++++++++++++--------
+ arch/x86/kernel/fpu/xstate.c | 3 -
+ 3 files changed, 46 insertions(+), 14 deletions(-)
+
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -42,6 +42,7 @@ extern void fpu__init_cpu_xstate(void);
+ extern void fpu__init_system(struct cpuinfo_x86 *c);
+ extern void fpu__init_check_bugs(void);
+ extern void fpu__resume_cpu(void);
++extern u64 fpu__get_supported_xfeatures_mask(void);
+
+ /*
+ * Debugging facility:
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -275,7 +275,45 @@ static void __init fpu__init_system_xsta
+ static enum { AUTO, ENABLE, DISABLE } eagerfpu = AUTO;
+
+ /*
++ * Find supported xfeatures based on cpu features and command-line input.
++ * This must be called after fpu__init_parse_early_param() is called and
++ * xfeatures_mask is enumerated.
++ */
++u64 __init fpu__get_supported_xfeatures_mask(void)
++{
++ /* Support all xfeatures known to us */
++ if (eagerfpu != DISABLE)
++ return XCNTXT_MASK;
++
++ /* Warning of xfeatures being disabled for no eagerfpu mode */
++ if (xfeatures_mask & XFEATURE_MASK_EAGER) {
++ pr_err("x86/fpu: eagerfpu switching disabled, disabling the following xstate features: 0x%llx.\n",
++ xfeatures_mask & XFEATURE_MASK_EAGER);
++ }
++
++ /* Return a mask that masks out all features requiring eagerfpu mode */
++ return ~XFEATURE_MASK_EAGER;
++}
++
++/*
++ * Disable features dependent on eagerfpu.
++ */
++static void __init fpu__clear_eager_fpu_features(void)
++{
++ setup_clear_cpu_cap(X86_FEATURE_MPX);
++}
++
++/*
+ * Pick the FPU context switching strategy:
++ *
++ * When eagerfpu is AUTO or ENABLE, we ensure it is ENABLE if either of
++ * the following is true:
++ *
++ * (1) the cpu has xsaveopt, as it has the optimization and doing eager
++ * FPU switching has a relatively low cost compared to a plain xsave;
++ * (2) the cpu has xsave features (e.g. MPX) that depend on eager FPU
++ * switching. Should the kernel boot with noxsaveopt, we support MPX
++ * with eager FPU switching at a higher cost.
+ */
+ static void __init fpu__init_system_ctx_switch(void)
+ {
+@@ -287,19 +325,11 @@ static void __init fpu__init_system_ctx_
+ WARN_ON_FPU(current->thread.fpu.fpstate_active);
+ current_thread_info()->status = 0;
+
+- /* Auto enable eagerfpu for xsaveopt */
+ if (cpu_has_xsaveopt && eagerfpu != DISABLE)
+ eagerfpu = ENABLE;
+
+- if (xfeatures_mask & XFEATURE_MASK_EAGER) {
+- if (eagerfpu == DISABLE) {
+- pr_err("x86/fpu: eagerfpu switching disabled, disabling the following xstate features: 0x%llx.\n",
+- xfeatures_mask & XFEATURE_MASK_EAGER);
+- xfeatures_mask &= ~XFEATURE_MASK_EAGER;
+- } else {
+- eagerfpu = ENABLE;
+- }
+- }
++ if (xfeatures_mask & XFEATURE_MASK_EAGER)
++ eagerfpu = ENABLE;
+
+ if (eagerfpu == ENABLE)
+ setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);
+@@ -317,10 +347,12 @@ static void __init fpu__init_parse_early
+ * No need to check "eagerfpu=auto" again, since it is the
+ * initial default.
+ */
+- if (cmdline_find_option_bool(boot_command_line, "eagerfpu=off"))
++ if (cmdline_find_option_bool(boot_command_line, "eagerfpu=off")) {
+ eagerfpu = DISABLE;
+- else if (cmdline_find_option_bool(boot_command_line, "eagerfpu=on"))
++ fpu__clear_eager_fpu_features();
++ } else if (cmdline_find_option_bool(boot_command_line, "eagerfpu=on")) {
+ eagerfpu = ENABLE;
++ }
+
+ if (cmdline_find_option_bool(boot_command_line, "no387"))
+ setup_clear_cpu_cap(X86_FEATURE_FPU);
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -633,8 +633,7 @@ void __init fpu__init_system_xstate(void
+ BUG();
+ }
+
+- /* Support only the state known to the OS: */
+- xfeatures_mask = xfeatures_mask & XCNTXT_MASK;
++ xfeatures_mask &= fpu__get_supported_xfeatures_mask();
+
+ /* Enable xstate instructions to be able to continue with initialization: */
+ fpu__init_cpu_xstate();
diff --git a/patches.suse/0003-x86-fpu-Fold-fpu_copy-into-fpu__copy.patch b/patches.suse/0003-x86-fpu-Fold-fpu_copy-into-fpu__copy.patch
new file mode 100644
index 0000000000..189b37ec2c
--- /dev/null
+++ b/patches.suse/0003-x86-fpu-Fold-fpu_copy-into-fpu__copy.patch
@@ -0,0 +1,87 @@
+From: Andy Lutomirski <luto@kernel.org>
+Date: Sun, 24 Jan 2016 14:38:08 -0800
+Subject: x86/fpu: Fold fpu_copy() into fpu__copy()
+Git-commit: a20d7297045f7fdcd676c15243192eb0e95a4306
+Patch-mainline: v4.6-rc1
+References: bnc#1087086 CVE-2018-3665
+
+Splitting it into two functions needlessly obfuscated the code.
+While we're at it, improve the comment slightly.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: yu-cheng yu <yu-cheng.yu@intel.com>
+Link: http://lkml.kernel.org/r/3eb5a63a9c5c84077b2677a7dfe684eef96fe59e.1453675014.git.luto@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ arch/x86/kernel/fpu/core.c | 32 +++++++++++---------------------
+ 1 file changed, 11 insertions(+), 21 deletions(-)
+
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -231,14 +231,15 @@ void fpstate_init(union fpregs_state *st
+ }
+ EXPORT_SYMBOL_GPL(fpstate_init);
+
+-/*
+- * Copy the current task's FPU state to a new task's FPU context.
+- *
+- * In both the 'eager' and the 'lazy' case we save hardware registers
+- * directly to the destination buffer.
+- */
+-static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
++int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
+ {
++ dst_fpu->counter = 0;
++ dst_fpu->fpregs_active = 0;
++ dst_fpu->last_cpu = -1;
++
++ if (!src_fpu->fpstate_active || !cpu_has_fpu)
++ return 0;
++
+ WARN_ON_FPU(src_fpu != &current->thread.fpu);
+
+ /*
+@@ -251,10 +252,9 @@ static void fpu_copy(struct fpu *dst_fpu
+ /*
+ * Save current FPU registers directly into the child
+ * FPU context, without any memory-to-memory copying.
+- *
+- * If the FPU context got destroyed in the process (FNSAVE
+- * done on old CPUs) then copy it back into the source
+- * context and mark the current task for lazy restore.
++ * In lazy mode, if the FPU context isn't loaded into
++ * fpregs, CR0.TS will be set and do_device_not_available
++ * will load the FPU context.
+ *
+ * We have to do all this with preemption disabled,
+ * mostly because of the FNSAVE case, because in that
+@@ -274,16 +274,6 @@ static void fpu_copy(struct fpu *dst_fpu
+ fpregs_deactivate(src_fpu);
+ }
+ preempt_enable();
+-}
+-
+-int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
+-{
+- dst_fpu->counter = 0;
+- dst_fpu->fpregs_active = 0;
+- dst_fpu->last_cpu = -1;
+-
+- if (src_fpu->fpstate_active && cpu_has_fpu)
+- fpu_copy(dst_fpu, src_fpu);
+
+ return 0;
+ }
diff --git a/patches.suse/0004-x86-fpu-Speed-up-lazy-FPU-restores-slightly.patch b/patches.suse/0004-x86-fpu-Speed-up-lazy-FPU-restores-slightly.patch
new file mode 100644
index 0000000000..919e55f4c4
--- /dev/null
+++ b/patches.suse/0004-x86-fpu-Speed-up-lazy-FPU-restores-slightly.patch
@@ -0,0 +1,41 @@
+From: Andy Lutomirski <luto@kernel.org>
+Date: Sun, 24 Jan 2016 14:38:09 -0800
+Subject: x86/fpu: Speed up lazy FPU restores slightly
+Git-commit: c6ab109f7e0eae3bae3bb10f8ddb0df67735c150
+Patch-mainline: v4.6-rc1
+References: bnc#1087086 CVE-2018-3665
+
+If we have an FPU, there's no need to check CR0 for FPU emulation.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: yu-cheng yu <yu-cheng.yu@intel.com>
+Link: http://lkml.kernel.org/r/980004297e233c27066d54e71382c44cdd36ef7c.1453675014.git.luto@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ arch/x86/kernel/traps.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -753,7 +753,7 @@ do_device_not_available(struct pt_regs *
+ RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
+
+ #ifdef CONFIG_MATH_EMULATION
+- if (read_cr0() & X86_CR0_EM) {
++ if (!boot_cpu_has(X86_FEATURE_FPU) && (read_cr0() & X86_CR0_EM)) {
+ struct math_emu_info info = { };
+
+ conditional_sti(regs);
diff --git a/patches.suse/0005-x86-fpu-Default-eagerfpu-on-on-all-CPUs.patch b/patches.suse/0005-x86-fpu-Default-eagerfpu-on-on-all-CPUs.patch
new file mode 100644
index 0000000000..06af0e7c9a
--- /dev/null
+++ b/patches.suse/0005-x86-fpu-Default-eagerfpu-on-on-all-CPUs.patch
@@ -0,0 +1,86 @@
+From: Andy Lutomirski <luto@kernel.org>
+Date: Sun, 24 Jan 2016 14:38:10 -0800
+Subject: x86/fpu: Default eagerfpu=on on all CPUs
+Git-commit: 58122bf1d856a4ea9581d62a07c557d997d46a19
+Patch-mainline: v4.6-rc1
+References: bnc#1087086 CVE-2018-3665
+
+We have eager and lazy FPU modes, introduced in:
+
+ 304bceda6a18 ("x86, fpu: use non-lazy fpu restore for processors supporting xsave")
+
+The result is rather messy. There are two code paths in almost all
+of the FPU code, and only one of them (the eager case) is tested
+frequently, since most kernel developers have new enough hardware
+that we use eagerfpu.
+
+It seems that, on any remotely recent hardware, eagerfpu is a win:
+glibc uses SSE2, so laziness is probably overoptimistic, and, in any
+case, manipulating TS is far slower that saving and restoring the
+full state. (Stores to CR0.TS are serializing and are poorly
+optimized.)
+
+To try to shake out any latent issues on old hardware, this changes
+the default to eager on all CPUs. If no performance or functionality
+problems show up, a subsequent patch could remove lazy mode entirely.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: yu-cheng yu <yu-cheng.yu@intel.com>
+Link: http://lkml.kernel.org/r/ac290de61bf08d9cfc2664a4f5080257ffc1075a.1453675014.git.luto@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ arch/x86/kernel/fpu/init.c | 13 +++++--------
+ 1 file changed, 5 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -263,7 +263,10 @@ static void __init fpu__init_system_xsta
+ * not only saved the restores along the way, but we also have the
+ * FPU ready to be used for the original task.
+ *
+- * 'eager' switching is used on modern CPUs, there we switch the FPU
++ * 'lazy' is deprecated because it's almost never a performance win
++ * and it's much more complicated than 'eager'.
++ *
++ * 'eager' switching is by default on all CPUs, there we switch the FPU
+ * state during every context switch, regardless of whether the task
+ * has used FPU instructions in that time slice or not. This is done
+ * because modern FPU context saving instructions are able to optimize
+@@ -274,7 +277,7 @@ static void __init fpu__init_system_xsta
+ * to use 'eager' restores, if we detect that a task is using the FPU
+ * frequently. See the fpu->counter logic in fpu/internal.h for that. ]
+ */
+-static enum { AUTO, ENABLE, DISABLE } eagerfpu = AUTO;
++static enum { ENABLE, DISABLE } eagerfpu = ENABLE;
+
+ /*
+ * Find supported xfeatures based on cpu features and command-line input.
+@@ -345,15 +348,9 @@ static void __init fpu__init_system_ctx_
+ */
+ static void __init fpu__init_parse_early_param(void)
+ {
+- /*
+- * No need to check "eagerfpu=auto" again, since it is the
+- * initial default.
+- */
+ if (cmdline_find_option_bool(boot_command_line, "eagerfpu=off")) {
+ eagerfpu = DISABLE;
+ fpu__clear_eager_fpu_features();
+- } else if (cmdline_find_option_bool(boot_command_line, "eagerfpu=on")) {
+- eagerfpu = ENABLE;
+ }
+
+ if (cmdline_find_option_bool(boot_command_line, "no387"))
diff --git a/series.conf b/series.conf
index 9ba2deeb6c..bb2ade65a8 100644
--- a/series.conf
+++ b/series.conf
@@ -3932,6 +3932,18 @@
# changed this area a lot.
patches.arch/x86_64-hpet-64bit-timer.patch
+ patches.suse/0001-x86-fpu-Fix-early-FPU-command-line-parsing.patch
+ patches.suse/0002-x86-fpu-Disable-XGETBV1-when-no-XSAVE.patch
+ patches.suse/0003-x86-fpu-Disable-MPX-when-eagerfpu-is-off.patch
+
+ patches.suse/0001-x86-fpu-Fix-no387-regression.patch
+ patches.suse/0001-x86-fpu-Fix-eager-FPU-handling-on-legacy-FPU-machine.patch
+ patches.suse/0001-x86-fpu-Fix-math-emulation-in-eager-fpu-mode.patch
+ patches.suse/0002-x86-fpu-Fix-FNSAVE-usage-in-eagerfpu-mode.patch
+ patches.suse/0003-x86-fpu-Fold-fpu_copy-into-fpu__copy.patch
+ patches.suse/0004-x86-fpu-Speed-up-lazy-FPU-restores-slightly.patch
+ patches.suse/0005-x86-fpu-Default-eagerfpu-on-on-all-CPUs.patch
+
# bsc#940946, bsc#937444: kexec, apic_extnmi
patches.arch/00-panic-x86-fix-re-entrance-problem-due-to-panic-on-nmi.patch
patches.arch/01-panic-x86-allow-cpus-to-save-registers-even-if-looping-in-nmi-context.patch
@@ -20549,6 +20561,8 @@
# bsc#1073311, CVE-2017-17741
patches.arch/kvm-fix-stack-out-of-bounds-read-in-write_mmio
+ patches.arch/kvm-x86-remove-code-for-lazy-fpu-handling
+
########################################################
# IOMMU patches
########################################################