Home Home > GIT Browse > SLE15-SP1-AZURE
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKernel Build Daemon <kbuild@suse.de>2018-05-22 07:17:22 +0200
committerKernel Build Daemon <kbuild@suse.de>2018-05-22 07:17:22 +0200
commit66b2edaa09cb505a66dee5f427c110816fca589c (patch)
treeaad1376562a6f39ffc87a1d4dafeae8d0c256f24
parent19dcc0f0c991f18ce798a69a1587e92c78829592 (diff)
parentf1ac4a9a231c257dce6d22c8814b73dc8f171040 (diff)
Merge branch 'SLE15' into openSUSE-15.0rpm-4.12.14-lp150.12.4--15.0-updatesrpm-4.12.14-lp150.12.4
-rw-r--r--patches.arch/KVM--SVM--Move-spec-control-call-after-restore-of-GS.patch59
-rw-r--r--patches.arch/powerpc-64s-Add-support-for-a-store-forwarding-barri.patch578
-rw-r--r--patches.kabi/bpf-prevent-memory-disambiguation-attack.patch24
-rw-r--r--patches.suse/01-x86-nospec-simplify-alternative_msr_write.patch65
-rw-r--r--patches.suse/02-x86-bugs-concentrate-bug-detection-into-a-separate-function.patch67
-rw-r--r--patches.suse/03-x86-bugs-concentrate-bug-reporting-into-a-separate-function.patch84
-rw-r--r--patches.suse/04-x86-bugs-read-spec_ctrl-msr-during-boot-and-re-use-reserved-bits.patch133
-rw-r--r--patches.suse/05-x86-bugs-kvm-support-the-combination-of-guest-and-host-ibrs.patch123
-rw-r--r--patches.suse/06-x86-bugs-expose-sys-spec_store_bypass.patch130
-rw-r--r--patches.suse/07-x86-cpufeatures-add-x86_feature_rds.patch29
-rw-r--r--patches.suse/08-x86-bugs-provide-boot-parameters-for-the-spec_store_bypass_disable-mitigation.patch258
-rw-r--r--patches.suse/09-x86-bugs-intel-set-proper-cpu-features-and-setup-rds.patch167
-rw-r--r--patches.suse/10-x86-bugs-whitelist-allowed-spec_ctrl-msr-values.patch63
-rw-r--r--patches.suse/11-x86-bugs-amd-add-support-to-disable-rds-on-famh-if-requested.patch184
-rw-r--r--patches.suse/12-x86-kvm-vmx-expose-spec_ctrl-bit2-to-the-guest.patch61
-rw-r--r--patches.suse/13-x86-speculation-create-spec-ctrl-h-to-avoid-include-hell.patch121
-rw-r--r--patches.suse/14-prctl-add-speculation-control-prctls.patch224
-rw-r--r--patches.suse/15-x86-process-allow-runtime-control-of-speculative-store-bypass.patch210
-rw-r--r--patches.suse/16-x86-speculation-add-prctl-for-speculative-store-bypass-mitigation.patch209
-rw-r--r--patches.suse/17-nospec-allow-getting-setting-on-non-current-task.patch151
-rw-r--r--patches.suse/18-proc-provide-details-on-speculation-flaw-mitigations.patch54
-rw-r--r--patches.suse/19-seccomp-enable-speculation-flaw-mitigations.patch57
-rw-r--r--patches.suse/20-x86-bugs-make-boot-modes-_ro_after_init.patch36
-rw-r--r--patches.suse/21-prctl-add-force-disable-speculation.patch204
-rw-r--r--patches.suse/22-seccomp-use-pr_spec_force_disable.patch26
-rw-r--r--patches.suse/23-seccomp-add-filter-flag-to-opt-out-of-ssb-mitigation.patch129
-rw-r--r--patches.suse/24-seccomp-move-speculation-migitation-control-to-arch-code.patch109
-rw-r--r--patches.suse/25-x86-speculation-make-seccomp-the-default-mode-for-speculative-store-bypass.patch154
-rw-r--r--patches.suse/26-x86-bugs-rename-rds-to-ssbd.patch377
-rw-r--r--patches.suse/27-proc-use-underscores-for-ssbd-in-status.patch27
-rw-r--r--patches.suse/bpf-prevent-memory-disambiguation-attack.patch134
-rw-r--r--series.conf33
32 files changed, 4280 insertions, 0 deletions
diff --git a/patches.arch/KVM--SVM--Move-spec-control-call-after-restore-of-GS.patch b/patches.arch/KVM--SVM--Move-spec-control-call-after-restore-of-GS.patch
new file mode 100644
index 0000000000..621857759e
--- /dev/null
+++ b/patches.arch/KVM--SVM--Move-spec-control-call-after-restore-of-GS.patch
@@ -0,0 +1,59 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Subject: KVM: SVM: Move spec control call after restore of GS
+Patch-mainline: not yet, queued in subsystem tree
+References: bsc#1087082 CVE-2018-3639
+
+svm_vcpu_run() invokes x86_spec_ctrl_restore_host() after VMEXIT, but
+before the host GS is restored. x86_spec_ctrl_restore_host() uses 'current'
+to determine the host SSBD state of the thread. 'current' is GS based, but
+host GS is not yet restored and the access causes a triple fault.
+
+Move the call after the host GS restore.
+
+Fixes: 5cf687548705 ("x86/bugs, KVM: Support the combination of guest and host IBRS")
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Joerg Roedel <jroedel@suse.de>
+---
+ arch/x86/kvm/svm.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -5449,6 +5449,18 @@ static void svm_vcpu_run(struct kvm_vcpu
+ #endif
+ );
+
++ /* Eliminate branch target predictions from guest mode */
++ vmexit_fill_RSB();
++
++#ifdef CONFIG_X86_64
++ wrmsrl(MSR_GS_BASE, svm->host.gs_base);
++#else
++ loadsegment(fs, svm->host.fs);
++#ifndef CONFIG_X86_32_LAZY_GS
++ loadsegment(gs, svm->host.gs);
++#endif
++#endif
++
+ /*
+ * We do not use IBRS in the kernel. If this vCPU has used the
+ * SPEC_CTRL MSR it may have left it on; save the value and
+@@ -5469,18 +5481,6 @@ static void svm_vcpu_run(struct kvm_vcpu
+
+ x86_spec_ctrl_restore_host(svm->spec_ctrl);
+
+- /* Eliminate branch target predictions from guest mode */
+- vmexit_fill_RSB();
+-
+-#ifdef CONFIG_X86_64
+- wrmsrl(MSR_GS_BASE, svm->host.gs_base);
+-#else
+- loadsegment(fs, svm->host.fs);
+-#ifndef CONFIG_X86_32_LAZY_GS
+- loadsegment(gs, svm->host.gs);
+-#endif
+-#endif
+-
+ reload_tss(vcpu);
+
+ local_irq_disable();
diff --git a/patches.arch/powerpc-64s-Add-support-for-a-store-forwarding-barri.patch b/patches.arch/powerpc-64s-Add-support-for-a-store-forwarding-barri.patch
new file mode 100644
index 0000000000..6f326119a9
--- /dev/null
+++ b/patches.arch/powerpc-64s-Add-support-for-a-store-forwarding-barri.patch
@@ -0,0 +1,578 @@
+From 525885b1ba9c7e4cc38ab6d72a041ef8787a4698 Mon Sep 17 00:00:00 2001
+From: Nicholas Piggin <npiggin@gmail.com>
+Date: Tue, 24 Apr 2018 16:55:14 +1000
+Subject: [PATCH] powerpc/64s: Add support for a store forwarding barrier at
+ kernel entry/exit
+
+References: CVE-2018-3639, bsc#1087082
+Patch-mainline: no, under development
+
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Michal Suchanek <msuchanek@suse.de>
+[mauricio: backport to SLES 15 linux-4.12.14-92.1]
+Signed-off-by: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+---
+ arch/powerpc/include/asm/exception-64s.h | 29 ++++++++
+ arch/powerpc/include/asm/feature-fixups.h | 19 +++++
+ arch/powerpc/include/asm/setup.h | 12 ++-
+ arch/powerpc/kernel/exceptions-64s.S | 19 ++++-
+ arch/powerpc/kernel/setup_64.c | 119 +++++++++++++++++++++++++++++-
+ arch/powerpc/kernel/vmlinux.lds.S | 14 ++++
+ arch/powerpc/lib/feature-fixups.c | 114 ++++++++++++++++++++++++++++
+ arch/powerpc/platforms/powernv/setup.c | 1 +
+ arch/powerpc/platforms/pseries/setup.c | 1 +
+ 9 files changed, 320 insertions(+), 8 deletions(-)
+
+diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
+index 74419070d605..d838b2ceebe1 100644
+--- a/arch/powerpc/include/asm/exception-64s.h
++++ b/arch/powerpc/include/asm/exception-64s.h
+@@ -54,6 +54,27 @@
+
+ #define EX_SIZE 13 /* size in u64 units */
+
++#define STF_ENTRY_BARRIER_SLOT \
++ STF_ENTRY_BARRIER_FIXUP_SECTION; \
++ mflr r10; \
++ bl stf_barrier_fallback; \
++ mtlr r10
++
++#define STF_EXIT_BARRIER_SLOT \
++ STF_EXIT_BARRIER_FIXUP_SECTION; \
++ nop; \
++ nop; \
++ nop; \
++ nop; \
++ nop; \
++ nop
++
++/*
++ * r10 must be free to use, r13 must be paca
++ */
++#define INTERRUPT_TO_KERNEL \
++ STF_ENTRY_BARRIER_SLOT
++
+ /*
+ * Macros for annotating the expected destination of (h)rfid
+ *
+@@ -70,16 +91,19 @@
+ rfid
+
+ #define RFI_TO_USER \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ rfid; \
+ b rfi_flush_fallback
+
+ #define RFI_TO_USER_OR_KERNEL \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ rfid; \
+ b rfi_flush_fallback
+
+ #define RFI_TO_GUEST \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ rfid; \
+ b rfi_flush_fallback
+@@ -88,21 +112,25 @@
+ hrfid
+
+ #define HRFI_TO_USER \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+
+ #define HRFI_TO_USER_OR_KERNEL \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+
+ #define HRFI_TO_GUEST \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+
+ #define HRFI_TO_UNKNOWN \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+@@ -234,6 +262,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
+ #define __EXCEPTION_PROLOG_1(area, extra, vec) \
+ OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \
+ OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \
++ INTERRUPT_TO_KERNEL; \
+ SAVE_CTR(r10, area); \
+ mfcr r9; \
+ extra(vec); \
+diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
+index dcac380656e9..861972a7031e 100644
+--- a/arch/powerpc/include/asm/feature-fixups.h
++++ b/arch/powerpc/include/asm/feature-fixups.h
+@@ -187,6 +187,22 @@ label##3: \
+ FTR_ENTRY_OFFSET label##1b-label##3b; \
+ .popsection;
+
++#define STF_ENTRY_BARRIER_FIXUP_SECTION \
++953: \
++ .pushsection __stf_entry_barrier_fixup,"a"; \
++ .align 2; \
++954: \
++ FTR_ENTRY_OFFSET 953b-954b; \
++ .popsection;
++
++#define STF_EXIT_BARRIER_FIXUP_SECTION \
++955: \
++ .pushsection __stf_exit_barrier_fixup,"a"; \
++ .align 2; \
++956: \
++ FTR_ENTRY_OFFSET 955b-956b; \
++ .popsection;
++
+ #define RFI_FLUSH_FIXUP_SECTION \
+ 951: \
+ .pushsection __rfi_flush_fixup,"a"; \
+@@ -207,6 +223,9 @@ label##3: \
+ #ifndef __ASSEMBLY__
+ #include <linux/types.h>
+
++extern long stf_barrier_fallback;
++extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
++extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
+ extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
+ extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
+
+diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
+index addb0672b1f3..26f0cbecf1a2 100644
+--- a/arch/powerpc/include/asm/setup.h
++++ b/arch/powerpc/include/asm/setup.h
+@@ -38,9 +38,17 @@ static inline void pseries_big_endian_exceptions(void) {}
+ static inline void pseries_little_endian_exceptions(void) {}
+ #endif /* CONFIG_PPC_PSERIES */
+
+-void rfi_flush_enable(bool enable);
+-
+ /* These are bit flags */
++enum stf_barrier_type {
++ STF_BARRIER_NONE = 0x1,
++ STF_BARRIER_FALLBACK = 0x2,
++ STF_BARRIER_EIEIO = 0x4,
++ STF_BARRIER_SYNC_ORI = 0x8,
++};
++
++void setup_stf_barrier(void);
++void do_stf_barrier_fixups(enum stf_barrier_type types);
++
+ enum l1d_flush_type {
+ L1D_FLUSH_NONE = 0x1,
+ L1D_FLUSH_FALLBACK = 0x2,
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index 40007ebf0fb1..02c78571198c 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -837,7 +837,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
+ #endif
+
+
+-EXC_REAL_MASKABLE(decrementer, 0x900, 0x80)
++EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x80)
+ EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x80, 0x900)
+ TRAMP_KVM(PACA_EXGEN, 0x900)
+ EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
+@@ -913,6 +913,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
+ mtctr r13; \
+ GET_PACA(r13); \
+ std r10,PACA_EXGEN+EX_R10(r13); \
++ INTERRUPT_TO_KERNEL; \
+ KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \
+ HMT_MEDIUM; \
+ mfctr r9;
+@@ -921,7 +922,8 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
+ #define SYSCALL_KVMTEST \
+ HMT_MEDIUM; \
+ mr r9,r13; \
+- GET_PACA(r13);
++ GET_PACA(r13); \
++ INTERRUPT_TO_KERNEL;
+ #endif
+
+ #define LOAD_SYSCALL_HANDLER(reg) \
+@@ -1423,6 +1425,19 @@ masked_##_H##interrupt: \
+ ##_H##RFI_TO_KERNEL; \
+ b .
+
++TRAMP_REAL_BEGIN(stf_barrier_fallback)
++ std r9,PACA_EXRFI+EX_R9(r13)
++ std r10,PACA_EXRFI+EX_R10(r13)
++ sync
++ ld r9,PACA_EXRFI+EX_R9(r13)
++ ld r10,PACA_EXRFI+EX_R10(r13)
++ ori 31,31,0
++ .rept 14
++ b 1f
++1:
++ .endr
++ blr
++
+ TRAMP_REAL_BEGIN(rfi_flush_fallback)
+ SET_SCRATCH0(r13);
+ GET_PACA(r13);
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index 25e11b6f429a..ed19cd862f68 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -69,6 +69,7 @@
+ #include <asm/livepatch.h>
+ #include <asm/opal.h>
+ #include <asm/cputhreads.h>
++#include <asm/security_features.h>
+
+ #ifdef DEBUG
+ #define DBG(fmt...) udbg_printf(fmt)
+@@ -791,11 +792,48 @@ early_initcall(disable_hardlockup_detector);
+ #endif
+
+ #ifdef CONFIG_PPC_BOOK3S_64
+-static enum l1d_flush_type enabled_flush_types;
++static enum stf_barrier_type stf_enabled_flush_types;
++static bool no_stf_barrier;
++bool stf_barrier;
++
++static enum l1d_flush_type rfi_enabled_flush_types;
+ static void *l1d_flush_fallback_area;
+ static bool no_rfi_flush;
+ bool rfi_flush;
+
++static int __init handle_no_stf_barrier(char *p)
++{
++ pr_info("stf-barrier: disabled on command line.");
++ no_stf_barrier = true;
++ return 0;
++}
++
++early_param("no_stf_barrier", handle_no_stf_barrier);
++
++/* This is the generic flag used by other architectures */
++static int __init handle_ssbd(char *p)
++{
++ if (!p || strncmp(p, "off", 3) == 0) {
++ handle_no_stf_barrier(NULL);
++ return 0;
++ } else if (strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 )
++ /* Until firmware tells us, we have the barrier with auto */
++ return 0;
++ else
++ return 1;
++
++ return 0;
++}
++early_param("spec_store_bypass_disable", handle_ssbd);
++
++/* This is the generic flag used by other architectures */
++static int __init handle_no_ssbd(char *p)
++{
++ handle_no_stf_barrier(NULL);
++ return 0;
++}
++early_param("nospec_store_bypass_disable", handle_no_ssbd);
++
+ static int __init handle_no_rfi_flush(char *p)
+ {
+ pr_info("rfi-flush: disabled on command line.");
+@@ -824,10 +862,21 @@ static void do_nothing(void *unused)
+ */
+ }
+
+-void rfi_flush_enable(bool enable)
++static void stf_barrier_enable(bool enable)
++{
++ if (enable) {
++ do_stf_barrier_fixups(stf_enabled_flush_types);
++ on_each_cpu(do_nothing, NULL, 1);
++ } else
++ do_stf_barrier_fixups(STF_BARRIER_NONE);
++
++ stf_barrier = enable;
++}
++
++static void rfi_flush_enable(bool enable)
+ {
+ if (enable) {
+- do_rfi_flush_fixups(enabled_flush_types);
++ do_rfi_flush_fixups(rfi_enabled_flush_types);
+ on_each_cpu(do_nothing, NULL, 1);
+ } else
+ do_rfi_flush_fixups(L1D_FLUSH_NONE);
+@@ -835,6 +884,41 @@ void rfi_flush_enable(bool enable)
+ rfi_flush = enable;
+ }
+
++void setup_stf_barrier(void)
++{
++ enum stf_barrier_type type;
++ bool enable, hv;
++
++ hv = cpu_has_feature(CPU_FTR_HVMODE);
++
++ /* Default to fallback in case fw-features are not available */
++ if (cpu_has_feature(CPU_FTR_ARCH_300))
++ type = STF_BARRIER_EIEIO;
++ else if (cpu_has_feature(CPU_FTR_ARCH_207S))
++ type = STF_BARRIER_SYNC_ORI;
++ else if (cpu_has_feature(CPU_FTR_ARCH_206))
++ type = STF_BARRIER_FALLBACK;
++ else
++ type = STF_BARRIER_NONE;
++
++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
++ (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) ||
++ (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv));
++
++ if (type == STF_BARRIER_FALLBACK) {
++ pr_info("stf-barrier: fallback barrier available\n");
++ } else if (type == STF_BARRIER_SYNC_ORI) {
++ pr_info("stf-barrier: hwsync barrier available\n");
++ } else if (type == STF_BARRIER_EIEIO) {
++ pr_info("stf-barrier: eieio barrier available\n");
++ }
++
++ stf_enabled_flush_types = type;
++
++ if (!no_stf_barrier)
++ stf_barrier_enable(enable);
++}
++
+ static void init_fallback_flush(void)
+ {
+ u64 l1d_size, limit;
+@@ -874,13 +958,39 @@ void setup_rfi_flush(enum l1d_flush_type types, bool enable)
+ if (types & L1D_FLUSH_MTTRIG)
+ pr_info("rfi-flush: mttrig type flush available\n");
+
+- enabled_flush_types = types;
++ rfi_enabled_flush_types = types;
+
+ if (!no_rfi_flush)
+ rfi_flush_enable(enable);
+ }
+
+ #ifdef CONFIG_DEBUG_FS
++static int stf_barrier_set(void *data, u64 val)
++{
++ bool enable;
++
++ if (val == 1)
++ enable = true;
++ else if (val == 0)
++ enable = false;
++ else
++ return -EINVAL;
++
++ /* Only do anything if we're changing state */
++ if (enable != stf_barrier)
++ stf_barrier_enable(enable);
++
++ return 0;
++}
++
++static int stf_barrier_get(void *data, u64 *val)
++{
++ *val = stf_barrier ? 1 : 0;
++ return 0;
++}
++
++DEFINE_SIMPLE_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, "%llu\n");
++
+ static int rfi_flush_set(void *data, u64 val)
+ {
+ bool enable;
+@@ -910,6 +1020,7 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
+ static __init int rfi_flush_debugfs_init(void)
+ {
+ debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
++ debugfs_create_file("stf_barrier", 0600, powerpc_debugfs_root, NULL, &fops_stf_barrier);
+ return 0;
+ }
+ device_initcall(rfi_flush_debugfs_init);
+diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
+index fdd7e1bc3811..7219d83917e0 100644
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -136,6 +136,20 @@ SECTIONS
+
+ #ifdef CONFIG_PPC64
+ . = ALIGN(8);
++ __stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
++ __start___stf_entry_barrier_fixup = .;
++ *(__stf_entry_barrier_fixup)
++ __stop___stf_entry_barrier_fixup = .;
++ }
++
++ . = ALIGN(8);
++ __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
++ __start___stf_exit_barrier_fixup = .;
++ *(__stf_exit_barrier_fixup)
++ __stop___stf_exit_barrier_fixup = .;
++ }
++
++ . = ALIGN(8);
+ __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
+ __start___rfi_flush_fixup = .;
+ *(__rfi_flush_fixup)
+diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
+index 465fb6cbd981..c0f53a599f36 100644
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -117,6 +117,120 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
+ }
+
+ #ifdef CONFIG_PPC_BOOK3S_64
++void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
++{
++ unsigned int instrs[3], *dest;
++ long *start, *end;
++ int i;
++
++ start = PTRRELOC(&__start___stf_entry_barrier_fixup),
++ end = PTRRELOC(&__stop___stf_entry_barrier_fixup);
++
++ instrs[0] = 0x60000000; /* nop */
++ instrs[1] = 0x60000000; /* nop */
++ instrs[2] = 0x60000000; /* nop */
++
++ i = 0;
++ if (types & STF_BARRIER_FALLBACK) {
++ instrs[i++] = 0x7d4802a6; /* mflr r10 */
++ instrs[i++] = 0x60000000; /* branch patched below */
++ instrs[i++] = 0x7d4803a6; /* mtlr r10 */
++ } else if (types & STF_BARRIER_EIEIO) {
++ instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
++ } else if (types & STF_BARRIER_SYNC_ORI) {
++ instrs[i++] = 0x7c0004ac; /* hwsync */
++ instrs[i++] = 0xe94d0000; /* ld r10,0(r13) */
++ instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
++ }
++
++ for (i = 0; start < end; start++, i++) {
++ dest = (void *)start + *start;
++
++ pr_devel("patching dest %lx\n", (unsigned long)dest);
++
++ patch_instruction(dest, instrs[0]);
++
++ if (types & STF_BARRIER_FALLBACK)
++ patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback,
++ BRANCH_SET_LINK);
++ else
++ patch_instruction(dest + 1, instrs[1]);
++
++ patch_instruction(dest + 2, instrs[2]);
++ }
++
++ printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
++ (types == STF_BARRIER_NONE) ? "no" :
++ (types == STF_BARRIER_FALLBACK) ? "fallback" :
++ (types == STF_BARRIER_EIEIO) ? "eieio" :
++ (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
++ : "unknown");
++}
++
++void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
++{
++ unsigned int instrs[6], *dest;
++ long *start, *end;
++ int i;
++
++ start = PTRRELOC(&__start___stf_exit_barrier_fixup),
++ end = PTRRELOC(&__stop___stf_exit_barrier_fixup);
++
++ instrs[0] = 0x60000000; /* nop */
++ instrs[1] = 0x60000000; /* nop */
++ instrs[2] = 0x60000000; /* nop */
++ instrs[3] = 0x60000000; /* nop */
++ instrs[4] = 0x60000000; /* nop */
++ instrs[5] = 0x60000000; /* nop */
++
++ i = 0;
++ if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) {
++ if (cpu_has_feature(CPU_FTR_HVMODE)) {
++ instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */
++ instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */
++ } else {
++ instrs[i++] = 0x7db243a6; /* mtsprg 2,r13 */
++ instrs[i++] = 0x7db142a6; /* mfsprg r13,1 */
++ }
++ instrs[i++] = 0x7c0004ac; /* hwsync */
++ instrs[i++] = 0xe9ad0000; /* ld r13,0(r13) */
++ instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
++ if (cpu_has_feature(CPU_FTR_HVMODE)) {
++ instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */
++ } else {
++ instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */
++ }
++ } else if (types & STF_BARRIER_EIEIO) {
++ instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
++ }
++
++ for (i = 0; start < end; start++, i++) {
++ dest = (void *)start + *start;
++
++ pr_devel("patching dest %lx\n", (unsigned long)dest);
++
++ patch_instruction(dest, instrs[0]);
++ patch_instruction(dest + 1, instrs[1]);
++ patch_instruction(dest + 2, instrs[2]);
++ patch_instruction(dest + 3, instrs[3]);
++ patch_instruction(dest + 4, instrs[4]);
++ patch_instruction(dest + 5, instrs[5]);
++ }
++ printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
++ (types == STF_BARRIER_NONE) ? "no" :
++ (types == STF_BARRIER_FALLBACK) ? "fallback" :
++ (types == STF_BARRIER_EIEIO) ? "eieio" :
++ (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
++ : "unknown");
++}
++
++
++void do_stf_barrier_fixups(enum stf_barrier_type types)
++{
++ do_stf_entry_barrier_fixups(types);
++ do_stf_exit_barrier_fixups(types);
++}
++
+ void do_rfi_flush_fixups(enum l1d_flush_type types)
+ {
+ unsigned int instrs[3], *dest;
+diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
+index dbced412fd7f..0f4a487282d0 100644
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -131,6 +131,7 @@ static void __init pnv_setup_arch(void)
+ {
+ set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
+
++ setup_stf_barrier();
+ pnv_setup_rfi_flush();
+
+ /* Initialize SMP */
+diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
+index 15b63bb5ec52..16d26cde2beb 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -711,6 +711,7 @@ static void __init pSeries_setup_arch(void)
+ fwnmi_init();
+
+ pseries_setup_rfi_flush();
++ setup_stf_barrier();
+
+ /* By default, only probe PCI (can be overridden by rtas_pci) */
+ pci_add_flags(PCI_PROBE_ONLY);
+--
+2.13.6
+
diff --git a/patches.kabi/bpf-prevent-memory-disambiguation-attack.patch b/patches.kabi/bpf-prevent-memory-disambiguation-attack.patch
new file mode 100644
index 0000000000..5a692eec02
--- /dev/null
+++ b/patches.kabi/bpf-prevent-memory-disambiguation-attack.patch
@@ -0,0 +1,24 @@
+From: Jiri Kosina <jkosina@suse.cz>
+Subject: [PATCH] kabi: ignore struct bpf_insn_aux_data change
+Patch-mainline: No, SUSE-specific
+References: References: bsc#1087082
+
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ include/linux/bpf_verifier.h | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -115,8 +115,10 @@ struct bpf_insn_aux_data {
+ struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
+ };
+ int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
+- int sanitize_stack_off; /* stack slot to be cleared */
+ bool seen; /* this insn was processed by the verifier */
++#ifndef __GENKSYMS__
++ int sanitize_stack_off; /* stack slot to be cleared */
++#endif
+ };
+
+ #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
diff --git a/patches.suse/01-x86-nospec-simplify-alternative_msr_write.patch b/patches.suse/01-x86-nospec-simplify-alternative_msr_write.patch
new file mode 100644
index 0000000000..cdcdfbc77b
--- /dev/null
+++ b/patches.suse/01-x86-nospec-simplify-alternative_msr_write.patch
@@ -0,0 +1,65 @@
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Tue, 1 May 2018 15:55:51 +0200
+Subject: x86/nospec: Simplify alternative_msr_write()
+Git-commit: 1aa7a5735a41418d8e01fa7c9565eb2657e2ea3f
+Patch-mainline: v4.18 or v4.17-rc4 (next release)
+References: bsc#1087082 CVE-2018-3639
+
+The macro is not type safe and I did look for why that "g" constraint for
+the asm doesn't work: it's because the asm is more fundamentally wrong.
+
+It does
+
+ movl %[val], %%eax
+
+but "val" isn't a 32-bit value, so then gcc will pass it in a register,
+and generate code like
+
+ movl %rsi, %eax
+
+and gas will complain about a nonsensical 'mov' instruction (it's moving a
+64-bit register to a 32-bit one).
+
+Passing it through memory will just hide the real bug - gcc still thinks
+the memory location is 64-bit, but the "movl" will only load the first 32
+bits and it all happens to work because x86 is little-endian.
+
+Convert it to a type safe inline function with a little trick which hands
+the feature into the ALTERNATIVE macro.
+
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/nospec-branch.h | 19 ++++++++++---------
+ 1 file changed, 10 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -236,15 +236,16 @@ static inline void vmexit_fill_RSB(void)
+ : : "memory" );
+ }
+
+-#define alternative_msr_write(_msr, _val, _feature) \
+- asm volatile(ALTERNATIVE("", \
+- "movl %[msr], %%ecx\n\t" \
+- "movl %[val], %%eax\n\t" \
+- "movl $0, %%edx\n\t" \
+- "wrmsr", \
+- _feature) \
+- : : [msr] "i" (_msr), [val] "i" (_val) \
+- : "eax", "ecx", "edx", "memory")
++static __always_inline
++void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
++{
++ asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
++ : : "c" (msr),
++ "a" (val),
++ "d" (val >> 32),
++ [feature] "i" (feature)
++ : "memory");
++}
+
+ static inline void indirect_branch_prediction_barrier(void)
+ {
diff --git a/patches.suse/02-x86-bugs-concentrate-bug-detection-into-a-separate-function.patch b/patches.suse/02-x86-bugs-concentrate-bug-detection-into-a-separate-function.patch
new file mode 100644
index 0000000000..d29a67da89
--- /dev/null
+++ b/patches.suse/02-x86-bugs-concentrate-bug-detection-into-a-separate-function.patch
@@ -0,0 +1,67 @@
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:16 -0400
+Subject: x86/bugs: Concentrate bug detection into a separate function
+Patch-mainline: not yet, queued in subsystem tree
+References: bsc#1087082 CVE-2018-3639
+
+Combine the various logic which goes through all those
+x86_cpu_id matching structures in one function.
+
+Suggested-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/common.c | 21 +++++++++++----------
+ 1 file changed, 11 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -894,21 +894,27 @@ static const __initconst struct x86_cpu_
+ {}
+ };
+
+-static bool __init cpu_vulnerable_to_meltdown(struct cpuinfo_x86 *c)
++static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ {
+ u64 ia32_cap = 0;
+
++ if (x86_match_cpu(cpu_no_speculation))
++ return;
++
++ setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
++ setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
++
+ if (x86_match_cpu(cpu_no_meltdown))
+- return false;
++ return;
+
+ if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+
+ /* Rogue Data Cache Load? No! */
+ if (ia32_cap & ARCH_CAP_RDCL_NO)
+- return false;
++ return;
+
+- return true;
++ setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+ }
+
+ /*
+@@ -958,12 +964,7 @@ static void __init early_identify_cpu(st
+
+ setup_force_cpu_cap(X86_FEATURE_ALWAYS);
+
+- if (!x86_match_cpu(cpu_no_speculation)) {
+- if (cpu_vulnerable_to_meltdown(c))
+- setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+- setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+- setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+- }
++ cpu_set_bug_bits(c);
+
+ fpu__init_system(c);
+ }
diff --git a/patches.suse/03-x86-bugs-concentrate-bug-reporting-into-a-separate-function.patch b/patches.suse/03-x86-bugs-concentrate-bug-reporting-into-a-separate-function.patch
new file mode 100644
index 0000000000..089b6cbe31
--- /dev/null
+++ b/patches.suse/03-x86-bugs-concentrate-bug-reporting-into-a-separate-function.patch
@@ -0,0 +1,84 @@
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:17 -0400
+Subject: x86/bugs: Concentrate bug reporting into a separate function
+Patch-mainline: not yet, queued in subsystem tree
+References: bsc#1087082 CVE-2018-3639
+
+Those SysFS functions have a similar preamble, as such make common
+code to handle them.
+
+Suggested-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/bugs.c | 46 +++++++++++++++++++++++++++++++--------------
+ 1 file changed, 32 insertions(+), 14 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -332,30 +332,48 @@ retpoline_auto:
+ #undef pr_fmt
+
+ #ifdef CONFIG_SYSFS
+-ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
++
++ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
++ char *buf, unsigned int bug)
+ {
+- if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
++ if (!boot_cpu_has_bug(bug))
+ return sprintf(buf, "Not affected\n");
+- if (boot_cpu_has(X86_FEATURE_PTI))
+- return sprintf(buf, "Mitigation: PTI\n");
++
++ switch (bug) {
++ case X86_BUG_CPU_MELTDOWN:
++ if (boot_cpu_has(X86_FEATURE_PTI))
++ return sprintf(buf, "Mitigation: PTI\n");
++
++ break;
++
++ case X86_BUG_SPECTRE_V1:
++ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
++
++ case X86_BUG_SPECTRE_V2:
++ return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
++ boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
++ boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
++ spectre_v2_module_string());
++
++ default:
++ break;
++ }
++
+ return sprintf(buf, "Vulnerable\n");
+ }
+
++ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
++}
++
+ ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+- if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
+- return sprintf(buf, "Not affected\n");
+- return sprintf(buf, "Mitigation: __user pointer sanitization\n");
++ return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
+ }
+
+ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+- if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+- return sprintf(buf, "Not affected\n");
+-
+- return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+- boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
+- boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+- spectre_v2_module_string());
++ return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
+ }
+ #endif
diff --git a/patches.suse/04-x86-bugs-read-spec_ctrl-msr-during-boot-and-re-use-reserved-bits.patch b/patches.suse/04-x86-bugs-read-spec_ctrl-msr-during-boot-and-re-use-reserved-bits.patch
new file mode 100644
index 0000000000..ade80e8cbb
--- /dev/null
+++ b/patches.suse/04-x86-bugs-read-spec_ctrl-msr-during-boot-and-re-use-reserved-bits.patch
@@ -0,0 +1,133 @@
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:18 -0400
+Subject: x86/bugs: Read SPEC_CTRL MSR during boot and re-use reserved bits
+Patch-mainline: not yet, queued in subsystem tree
+References: bsc#1087082 CVE-2018-3639
+
+The 336996-Speculative-Execution-Side-Channel-Mitigations.pdf refers to all
+the other bits as reserved. The Intel SDM glossary defines reserved as
+implementation specific - aka unknown.
+
+As such at bootup this must be taken it into account and proper masking for
+the bits in use applied.
+
+A copy of this document is available at
+https://bugzilla.kernel.org/show_bug.cgi?id=199511
+
+[ tglx: Made x86_spec_ctrl_base __ro_after_init ]
+
+Suggested-by: Jon Masters <jcm@redhat.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/nospec-branch.h | 24 ++++++++++++++++++++----
+ arch/x86/kernel/cpu/bugs.c | 28 ++++++++++++++++++++++++++++
+ 2 files changed, 48 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -214,6 +214,17 @@ enum spectre_v2_mitigation {
+ SPECTRE_V2_IBRS,
+ };
+
++/*
++ * The Intel specification for the SPEC_CTRL MSR requires that we
++ * preserve any already set reserved bits at boot time (e.g. for
++ * future additions that this kernel is not currently aware of).
++ * We then set any additional mitigation bits that we want
++ * ourselves and always use this as the base for SPEC_CTRL.
++ * We also use this when handling guest entry/exit as below.
++ */
++extern void x86_spec_ctrl_set(u64);
++extern u64 x86_spec_ctrl_get_default(void);
++
+ extern char __indirect_thunk_start[];
+ extern char __indirect_thunk_end[];
+
+@@ -249,8 +260,9 @@ void alternative_msr_write(unsigned int
+
+ static inline void indirect_branch_prediction_barrier(void)
+ {
+- alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,
+- X86_FEATURE_USE_IBPB);
++ u64 val = PRED_CMD_IBPB;
++
++ alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
+ }
+
+ /*
+@@ -297,14 +309,18 @@ static inline void unrestrict_branch_spe
+ */
+ #define firmware_restrict_branch_speculation_start() \
+ do { \
++ u64 val = x86_spec_ctrl_get_default() | SPEC_CTRL_IBRS; \
++ \
+ preempt_disable(); \
+- alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS, \
++ alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
+ X86_FEATURE_USE_IBRS_FW); \
+ } while (0)
+
+ #define firmware_restrict_branch_speculation_end() \
+ do { \
+- alternative_msr_write(MSR_IA32_SPEC_CTRL, 0, \
++ u64 val = x86_spec_ctrl_get_default(); \
++ \
++ alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
+ X86_FEATURE_USE_IBRS_FW); \
+ preempt_enable(); \
+ } while (0)
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -27,6 +27,12 @@
+
+ static void __init spectre_v2_select_mitigation(void);
+
++/*
++ * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
++ * writes to SPEC_CTRL contain whatever reserved bits have been set.
++ */
++static u64 __ro_after_init x86_spec_ctrl_base;
++
+ void __init check_bugs(void)
+ {
+ identify_boot_cpu();
+@@ -36,6 +42,13 @@ void __init check_bugs(void)
+ print_cpu_info(&boot_cpu_data);
+ }
+
++ /*
++ * Read the SPEC_CTRL MSR to account for reserved bits which may
++ * have unknown values.
++ */
++ if (boot_cpu_has(X86_FEATURE_IBRS))
++ rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
++
+ /* Select the proper spectre mitigation before patching alternatives */
+ spectre_v2_select_mitigation();
+
+@@ -96,6 +109,21 @@ static const char *spectre_v2_strings[]
+
+ static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
+
++void x86_spec_ctrl_set(u64 val)
++{
++ if (val & ~SPEC_CTRL_IBRS)
++ WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val);
++ else
++ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val);
++}
++EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
++
++u64 x86_spec_ctrl_get_default(void)
++{
++ return x86_spec_ctrl_base;
++}
++EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
++
+ #ifdef RETPOLINE
+ static bool spectre_v2_bad_module;
+
diff --git a/patches.suse/05-x86-bugs-kvm-support-the-combination-of-guest-and-host-ibrs.patch b/patches.suse/05-x86-bugs-kvm-support-the-combination-of-guest-and-host-ibrs.patch
new file mode 100644
index 0000000000..0df8c82a10
--- /dev/null
+++ b/patches.suse/05-x86-bugs-kvm-support-the-combination-of-guest-and-host-ibrs.patch
@@ -0,0 +1,123 @@
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:19 -0400
+Subject: x86/bugs, KVM: Support the combination of guest and host IBRS
+Patch-mainline: not yet, queued in subsystem tree
+References: bsc#1087082 CVE-2018-3639
+
+A guest may modify the SPEC_CTRL MSR from the value used by the
+kernel. Since the kernel doesn't use IBRS, this means a value of zero is
+what is needed in the host.
+
+But the 336996-Speculative-Execution-Side-Channel-Mitigations.pdf refers to
+the other bits as reserved so the kernel should respect the boot time
+SPEC_CTRL value and use that.
+
+This allows to deal with future extensions to the SPEC_CTRL interface if
+any at all.
+
+Note: This uses wrmsrl() instead of native_wrmsl(). I does not make any
+difference as paravirt will over-write the callq *0xfff.. with the wrmsrl
+assembler code.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/nospec-branch.h | 10 ++++++++++
+ arch/x86/kernel/cpu/bugs.c | 18 ++++++++++++++++++
+ arch/x86/kvm/svm.c | 6 ++----
+ arch/x86/kvm/vmx.c | 6 ++----
+ 4 files changed, 32 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -225,6 +225,16 @@ enum spectre_v2_mitigation {
+ extern void x86_spec_ctrl_set(u64);
+ extern u64 x86_spec_ctrl_get_default(void);
+
++/*
++ * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
++ * the guest has, while on VMEXIT we restore the host view. This
++ * would be easier if SPEC_CTRL were architecturally maskable or
++ * shadowable for guests but this is not (currently) the case.
++ * Takes the guest view of SPEC_CTRL MSR as a parameter.
++ */
++extern void x86_spec_ctrl_set_guest(u64);
++extern void x86_spec_ctrl_restore_host(u64);
++
+ extern char __indirect_thunk_start[];
+ extern char __indirect_thunk_end[];
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -124,6 +124,24 @@ u64 x86_spec_ctrl_get_default(void)
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
+
++void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
++{
++ if (!boot_cpu_has(X86_FEATURE_IBRS))
++ return;
++ if (x86_spec_ctrl_base != guest_spec_ctrl)
++ wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
++}
++EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
++
++void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
++{
++ if (!boot_cpu_has(X86_FEATURE_IBRS))
++ return;
++ if (x86_spec_ctrl_base != guest_spec_ctrl)
++ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
++}
++EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
++
+ #ifdef RETPOLINE
+ static bool spectre_v2_bad_module;
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -5355,8 +5355,7 @@ static void svm_vcpu_run(struct kvm_vcpu
+ * is no need to worry about the conditional branch over the wrmsr
+ * being speculatively taken.
+ */
+- if (svm->spec_ctrl)
+- native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
++ x86_spec_ctrl_set_guest(svm->spec_ctrl);
+
+ asm volatile (
+ "push %%" _ASM_BP "; \n\t"
+@@ -5468,8 +5467,7 @@ static void svm_vcpu_run(struct kvm_vcpu
+ if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
+ svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
+
+- if (svm->spec_ctrl)
+- native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
++ x86_spec_ctrl_restore_host(svm->spec_ctrl);
+
+ /* Eliminate branch target predictions from guest mode */
+ vmexit_fill_RSB();
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -9463,8 +9463,7 @@ static void __noclone vmx_vcpu_run(struc
+ * is no need to worry about the conditional branch over the wrmsr
+ * being speculatively taken.
+ */
+- if (vmx->spec_ctrl)
+- native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
++ x86_spec_ctrl_set_guest(vmx->spec_ctrl);
+
+ vmx->__launched = vmx->loaded_vmcs->launched;
+ asm(
+@@ -9602,8 +9601,7 @@ static void __noclone vmx_vcpu_run(struc
+ if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
+ vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
+
+- if (vmx->spec_ctrl)
+- native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
++ x86_spec_ctrl_restore_host(vmx->spec_ctrl);
+
+ /* Eliminate branch target predictions from guest mode */
+ vmexit_fill_RSB();
diff --git a/patches.suse/06-x86-bugs-expose-sys-spec_store_bypass.patch b/patches.suse/06-x86-bugs-expose-sys-spec_store_bypass.patch
new file mode 100644
index 0000000000..10b68e0bf5
--- /dev/null
+++ b/patches.suse/06-x86-bugs-expose-sys-spec_store_bypass.patch
@@ -0,0 +1,130 @@
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:20 -0400
+Subject: x86/bugs: Expose /sys/../spec_store_bypass
+Patch-mainline: not yet, queued in subsystem tree
+References: bsc#1087082 CVE-2018-3639
+
+Add the sysfs file for the new vulerability. It does not do much except
+show the words 'Vulnerable' for recent x86 cores.
+
+Intel cores prior to family 6 are known not to be vulnerable, and so are
+some Atoms and some Xeon Phi.
+
+It assumes that older Cyrix, Centaur, etc. cores are immune.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ Documentation/ABI/testing/sysfs-devices-system-cpu | 1
+ arch/x86/include/asm/cpufeatures.h | 1
+ arch/x86/kernel/cpu/bugs.c | 5 ++++
+ arch/x86/kernel/cpu/common.c | 23 +++++++++++++++++++++
+ drivers/base/cpu.c | 8 +++++++
+ include/linux/cpu.h | 2 +
+ 6 files changed, 40 insertions(+)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -360,4 +360,5 @@
+ #define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
+ #define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
+ #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
++#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
+ #endif /* _ASM_X86_CPUFEATURES_H */
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -422,4 +422,9 @@ ssize_t cpu_show_spectre_v2(struct devic
+ {
+ return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
+ }
++
++ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
++}
+ #endif
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -894,10 +894,33 @@ static const __initconst struct x86_cpu_
+ {}
+ };
+
++static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
++ { X86_VENDOR_CENTAUR, 5, },
++ { X86_VENDOR_INTEL, 5, },
++ { X86_VENDOR_NSC, 5, },
++ { X86_VENDOR_ANY, 4, },
++ {}
++};
++
+ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ {
+ u64 ia32_cap = 0;
+
++ if (!x86_match_cpu(cpu_no_spec_store_bypass))
++ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
++
+ if (x86_match_cpu(cpu_no_speculation))
+ return;
+
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -378,6 +378,7 @@ What: /sys/devices/system/cpu/vulnerabi
+ /sys/devices/system/cpu/vulnerabilities/meltdown
+ /sys/devices/system/cpu/vulnerabilities/spectre_v1
+ /sys/devices/system/cpu/vulnerabilities/spectre_v2
++ /sys/devices/system/cpu/vulnerabilities/spec_store_bypass
+ Date: January 2018
+ Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+ Description: Information about CPU vulnerabilities
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -521,14 +521,22 @@ ssize_t __weak cpu_show_spectre_v2(struc
+ return sprintf(buf, "Not affected\n");
+ }
+
++ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return sprintf(buf, "Not affected\n");
++}
++
+ static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+ static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+ static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
++static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
+
+ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ &dev_attr_meltdown.attr,
+ &dev_attr_spectre_v1.attr,
+ &dev_attr_spectre_v2.attr,
++ &dev_attr_spec_store_bypass.attr,
+ NULL
+ };
+
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -52,6 +52,8 @@ extern ssize_t cpu_show_spectre_v1(struc
+ struct device_attribute *attr, char *buf);
+ extern ssize_t cpu_show_spectre_v2(struct device *dev,
+ struct device_attribute *attr, char *buf);
++extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
++ struct device_attribute *attr, char *buf);
+
+ extern __printf(4, 5)
+ struct device *cpu_device_create(struct device *parent, void *drvdata,
diff --git a/patches.suse/07-x86-cpufeatures-add-x86_feature_rds.patch b/patches.suse/07-x86-cpufeatures-add-x86_feature_rds.patch
new file mode 100644
index 0000000000..80484156a5
--- /dev/null
+++ b/patches.suse/07-x86-cpufeatures-add-x86_feature_rds.patch
@@ -0,0 +1,29 @@
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Sat, 28 Apr 2018 22:34:17 +0200
+Subject: x86/cpufeatures: Add X86_FEATURE_RDS
+Patch-mainline: not yet, queued in subsystem tree
+References: bsc#1087082 CVE-2018-3639
+
+Add the CPU feature bit CPUID.7.0.EDX[31] which indicates whether the CPU
+supports Reduced Data Speculation.
+
+[ tglx: Split it out from a later patch ]
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -331,6 +331,7 @@
+ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
+ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
+ #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
++#define X86_FEATURE_RDS (18*32+31) /* Reduced Data Speculation */
+
+ /*
+ * BUG word(s)
diff --git a/patches.suse/08-x86-bugs-provide-boot-parameters-for-the-spec_store_bypass_disable-mitigation.patch b/patches.suse/08-x86-bugs-provide-boot-parameters-for-the-spec_store_bypass_disable-mitigation.patch
new file mode 100644
index 0000000000..e70f65521f
--- /dev/null
+++ b/patches.suse/08-x86-bugs-provide-boot-parameters-for-the-spec_store_bypass_disable-mitigation.patch
@@ -0,0 +1,258 @@
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:21 -0400
+Subject: x86/bugs: Provide boot parameters for the spec_store_bypass_disable mitigation
+Patch-mainline: not yet, queued in subsystem tree
+References: bsc#1087082 CVE-2018-3639
+
+Contemporary high performance processors use a common industry-wide
+optimization known as "Speculative Store Bypass" in which loads from
+addresses to which a recent store has occurred may (speculatively) see an
+older value. Intel refers to this feature as "Memory Disambiguation" which
+is part of their "Smart Memory Access" capability.
+
+Memory Disambiguation can expose a cache side-channel attack against such
+speculatively read values. An attacker can create exploit code that allows
+them to read memory outside of a sandbox environment (for example,
+malicious JavaScript in a web page), or to perform more complex attacks
+against code running within the same privilege level, e.g. via the stack.
+
+As a first step to mitigate against such attacks, provide two boot command
+line control knobs:
+
+ nospec_store_bypass_disable
+ spec_store_bypass_disable=[off,auto,on]
+
+By default affected x86 processors will power on with Speculative
+Store Bypass enabled. Hence the provided kernel parameters are written
+from the point of view of whether to enable a mitigation or not.
+The parameters are as follows:
+
+ - auto - Kernel detects whether your CPU model contains an implementation
+ of Speculative Store Bypass and picks the most appropriate
+ mitigation.
+
+ - on - disable Speculative Store Bypass
+ - off - enable Speculative Store Bypass
+
+[ tglx: Reordered the checks so that the whole evaluation is not done
+ when the CPU does not support RDS ]
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ Documentation/admin-guide/kernel-parameters.txt | 33 +++++++
+ arch/x86/include/asm/cpufeatures.h | 1
+ arch/x86/include/asm/nospec-branch.h | 6 +
+ arch/x86/kernel/cpu/bugs.c | 103 ++++++++++++++++++++++++
+ 4 files changed, 143 insertions(+)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -215,6 +215,7 @@
+ #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
+ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
+ #define X86_FEATURE_USE_IBRS ( 7*32+23) /* "" Use IBRS for Spectre v2 safety */
++#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+24) /* "" Disable Speculative Store Bypass. */
+
+ /* Virtualization flags: Linux defined, word 8 */
+ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -235,6 +235,12 @@ extern u64 x86_spec_ctrl_get_default(voi
+ extern void x86_spec_ctrl_set_guest(u64);
+ extern void x86_spec_ctrl_restore_host(u64);
+
++/* The Speculative Store Bypass disable variants */
++enum ssb_mitigation {
++ SPEC_STORE_BYPASS_NONE,
++ SPEC_STORE_BYPASS_DISABLE,
++};
++
+ extern char __indirect_thunk_start[];
+ extern char __indirect_thunk_end[];
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -26,6 +26,7 @@
+ #include <asm/intel-family.h>
+
+ static void __init spectre_v2_select_mitigation(void);
++static void __init ssb_select_mitigation(void);
+
+ /*
+ * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
+@@ -52,6 +53,12 @@ void __init check_bugs(void)
+ /* Select the proper spectre mitigation before patching alternatives */
+ spectre_v2_select_mitigation();
+
++ /*
++ * Select proper mitigation for any exposure to the Speculative Store
++ * Bypass vulnerability.
++ */
++ ssb_select_mitigation();
++
+ #ifdef CONFIG_X86_32
+ /*
+ * Check whether we are able to run this kernel safely on SMP.
+@@ -376,6 +383,99 @@ retpoline_auto:
+ }
+
+ #undef pr_fmt
++#define pr_fmt(fmt) "Speculative Store Bypass: " fmt
++
++static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE;
++
++/* The kernel command line selection */
++enum ssb_mitigation_cmd {
++ SPEC_STORE_BYPASS_CMD_NONE,
++ SPEC_STORE_BYPASS_CMD_AUTO,
++ SPEC_STORE_BYPASS_CMD_ON,
++};
++
++static const char *ssb_strings[] = {
++ [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
++ [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled"
++};
++
++static const struct {
++ const char *option;
++ enum ssb_mitigation_cmd cmd;
++} ssb_mitigation_options[] = {
++ { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
++ { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
++ { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
++};
++
++static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
++{
++ enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
++ char arg[20];
++ int ret, i;
++
++ if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
++ return SPEC_STORE_BYPASS_CMD_NONE;
++ } else {
++ ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
++ arg, sizeof(arg));
++ if (ret < 0)
++ return SPEC_STORE_BYPASS_CMD_AUTO;
++
++ for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
++ if (!match_option(arg, ret, ssb_mitigation_options[i].option))
++ continue;
++
++ cmd = ssb_mitigation_options[i].cmd;
++ break;
++ }
++
++ if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
++ pr_err("unknown option (%s). Switching to AUTO select\n", arg);
++ return SPEC_STORE_BYPASS_CMD_AUTO;
++ }
++ }
++
++ return cmd;
++}
++
++static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
++{
++ enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
++ enum ssb_mitigation_cmd cmd;
++
++ if (!boot_cpu_has(X86_FEATURE_RDS))
++ return mode;
++
++ cmd = ssb_parse_cmdline();
++ if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
++ (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
++ cmd == SPEC_STORE_BYPASS_CMD_AUTO))
++ return mode;
++
++ switch (cmd) {
++ case SPEC_STORE_BYPASS_CMD_AUTO:
++ case SPEC_STORE_BYPASS_CMD_ON:
++ mode = SPEC_STORE_BYPASS_DISABLE;
++ break;
++ case SPEC_STORE_BYPASS_CMD_NONE:
++ break;
++ }
++
++ if (mode != SPEC_STORE_BYPASS_NONE)
++ setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
++ return mode;
++}
++
++static void ssb_select_mitigation()
++{
++ ssb_mode = __ssb_select_mitigation();
++
++ if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
++ pr_info("%s\n", ssb_strings[ssb_mode]);
++}
++
++#undef pr_fmt
+
+ #ifdef CONFIG_SYSFS
+
+@@ -401,6 +501,9 @@ ssize_t cpu_show_common(struct device *d
+ boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+ spectre_v2_module_string());
+
++ case X86_BUG_SPEC_STORE_BYPASS:
++ return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
++
+ default:
+ break;
+ }
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2591,6 +2591,9 @@
+ allow data leaks with this option, which is equivalent
+ to spectre_v2=off.
+
++ nospec_store_bypass_disable
++ [HW] Disable all mitigations for the Speculative Store Bypass vulnerability
++
+ noxsave [BUGS=X86] Disables x86 extended register state save
+ and restore using xsave. The kernel will fallback to
+ enabling legacy floating-point and sse state.
+@@ -3904,6 +3907,36 @@
+ Not specifying this option is equivalent to
+ spectre_v2=auto.
+
++ spec_store_bypass_disable=
++ [HW] Control Speculative Store Bypass (SSB) Disable mitigation
++ (Speculative Store Bypass vulnerability)
++
++ Certain CPUs are vulnerable to an exploit against a
++ a common industry wide performance optimization known
++ as "Speculative Store Bypass" in which recent stores
++ to the same memory location may not be observed by
++ later loads during speculative execution. The idea
++ is that such stores are unlikely and that they can
++ be detected prior to instruction retirement at the
++ end of a particular speculation execution window.
++
++ In vulnerable processors, the speculatively forwarded
++ store can be used in a cache side channel attack, for
++ example to read memory to which the attacker does not
++ directly have access (e.g. inside sandboxed code).
++
++ This parameter controls whether the Speculative Store
++ Bypass optimization is used.
++
++ on - Unconditionally disable Speculative Store Bypass
++ off - Unconditionally enable Speculative Store Bypass
++ auto - Kernel detects whether the CPU model contains an
++ implementation of Speculative Store Bypass and
++ picks the most appropriate mitigation
++
++ Not specifying this option is equivalent to
++ spec_store_bypass_disable=auto.
++
+ spia_io_base= [HW,MTD]
+ spia_fio_base=
+ spia_pedr=
diff --git a/patches.suse/09-x86-bugs-intel-set-proper-cpu-features-and-setup-rds.patch b/patches.suse/09-x86-bugs-intel-set-proper-cpu-features-and-setup-rds.patch
new file mode 100644
index 0000000000..706cca3f67
--- /dev/null
+++ b/patches.suse/09-x86-bugs-intel-set-proper-cpu-features-and-setup-rds.patch
@@ -0,0 +1,167 @@
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:22 -0400
+Subject: x86/bugs/intel: Set proper CPU features and setup RDS
+Patch-mainline: not yet, queued in subsystem tree
+References: bsc#1087082 CVE-2018-3639
+
+Intel CPUs expose methods to:
+
+ - Detect whether RDS capability is available via CPUID.7.0.EDX[31],
+
+ - The SPEC_CTRL MSR(0x48), bit 2 set to enable RDS.
+
+ - MSR_IA32_ARCH_CAPABILITIES, Bit(4) no need to enable RRS.
+
+With that in mind if spec_store_bypass_disable=[auto,on] is selected set at
+boot-time the SPEC_CTRL MSR to enable RDS if the platform requires it.
+
+Note that this does not fix the KVM case where the SPEC_CTRL is exposed to
+guests which can muck with it, see patch titled :
+ KVM/SVM/VMX/x86/spectre_v2: Support the combination of guest and host IBRS.
+
+And for the firmware (IBRS to be set), see patch titled:
+ x86/spectre_v2: Read SPEC_CTRL MSR during boot and re-use reserved bits
+
+[ tglx: Distangled it from the intel implementation and kept the call order ]
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/msr-index.h | 6 ++++++
+ arch/x86/kernel/cpu/bugs.c | 30 ++++++++++++++++++++++++++++--
+ arch/x86/kernel/cpu/common.c | 10 ++++++----
+ arch/x86/kernel/cpu/cpu.h | 2 ++
+ arch/x86/kernel/cpu/intel.c | 1 +
+ 5 files changed, 43 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -41,6 +41,7 @@
+ #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
+ #define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
+ #define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
++#define SPEC_CTRL_RDS (1 << 2) /* Reduced Data Speculation */
+
+ #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
+ #define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
+@@ -67,6 +68,11 @@
+ #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
+ #define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
+ #define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
++#define ARCH_CAP_RDS_NO (1 << 4) /*
++ * Not susceptible to Speculative Store Bypass
++ * attack, so no Reduced Data Speculation control
++ * required.
++ */
+
+ #define MSR_IA32_BBL_CR_CTL 0x00000119
+ #define MSR_IA32_BBL_CR_CTL3 0x0000011e
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -118,7 +118,7 @@ static enum spectre_v2_mitigation spectr
+
+ void x86_spec_ctrl_set(u64 val)
+ {
+- if (val & ~SPEC_CTRL_IBRS)
++ if (val & ~(SPEC_CTRL_IBRS | SPEC_CTRL_RDS))
+ WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val);
+ else
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val);
+@@ -462,8 +462,28 @@ static enum ssb_mitigation_cmd __init __
+ break;
+ }
+
+- if (mode != SPEC_STORE_BYPASS_NONE)
++ /*
++ * We have three CPU feature flags that are in play here:
++ * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
++ * - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass
++ * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
++ */
++ if (mode != SPEC_STORE_BYPASS_NONE) {
+ setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
++ /*
++ * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
++ * a completely different MSR and bit dependent on family.
++ */
++ switch (boot_cpu_data.x86_vendor) {
++ case X86_VENDOR_INTEL:
++ x86_spec_ctrl_base |= SPEC_CTRL_RDS;
++ x86_spec_ctrl_set(SPEC_CTRL_RDS);
++ break;
++ case X86_VENDOR_AMD:
++ break;
++ }
++ }
++
+ return mode;
+ }
+
+@@ -477,6 +497,12 @@ static void ssb_select_mitigation()
+
+ #undef pr_fmt
+
++void x86_spec_ctrl_setup_ap(void)
++{
++ if (boot_cpu_has(X86_FEATURE_IBRS))
++ x86_spec_ctrl_set(x86_spec_ctrl_base & (SPEC_CTRL_IBRS | SPEC_CTRL_RDS));
++}
++
+ #ifdef CONFIG_SYSFS
+
+ ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -918,7 +918,11 @@ static void __init cpu_set_bug_bits(stru
+ {
+ u64 ia32_cap = 0;
+
+- if (!x86_match_cpu(cpu_no_spec_store_bypass))
++ if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
++ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
++
++ if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
++ !(ia32_cap & ARCH_CAP_RDS_NO))
+ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
+ if (x86_match_cpu(cpu_no_speculation))
+@@ -930,9 +934,6 @@ static void __init cpu_set_bug_bits(stru
+ if (x86_match_cpu(cpu_no_meltdown))
+ return;
+
+- if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
+- rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+-
+ /* Rogue Data Cache Load? No! */
+ if (ia32_cap & ARCH_CAP_RDCL_NO)
+ return;
+@@ -1338,6 +1339,7 @@ void identify_secondary_cpu(struct cpuin
+ #endif
+ mtrr_ap_init();
+ validate_apic_and_package_id(c);
++ x86_spec_ctrl_setup_ap();
+ }
+
+ static __init int setup_noclflush(char *arg)
+--- a/arch/x86/kernel/cpu/cpu.h
++++ b/arch/x86/kernel/cpu/cpu.h
+@@ -46,4 +46,6 @@ extern const struct cpu_dev *const __x86
+
+ extern void get_cpu_cap(struct cpuinfo_x86 *c);
+ extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
++extern void x86_spec_ctrl_setup_ap(void);
++
+ #endif /* ARCH_X86_CPU_H */
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -188,6 +188,7 @@ static void early_init_intel(struct cpui
+ setup_clear_cpu_cap(X86_FEATURE_STIBP);
+ setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
+ setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
++ setup_clear_cpu_cap(X86_FEATURE_RDS);
+ }
+
+ /*
diff --git a/patches.suse/10-x86-bugs-whitelist-allowed-spec_ctrl-msr-values.patch b/patches.suse/10-x86-bugs-whitelist-allowed-spec_ctrl-msr-values.patch
new file mode 100644
index 0000000000..1eb42379e8
--- /dev/null
+++ b/patches.suse/10-x86-bugs-whitelist-allowed-spec_ctrl-msr-values.patch
@@ -0,0 +1,63 @@
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:23 -0400
+Subject: x86/bugs: Whitelist allowed SPEC_CTRL MSR values
+Patch-mainline: not yet, queued in subsystem tree
+References: bsc#1087082 CVE-2018-3639
+
+Intel and AMD SPEC_CTRL (0x48) MSR semantics may differ in the
+future (or in fact use different MSRs for the same functionality).
+
+As such a run-time mechanism is required to whitelist the appropriate MSR
+values.
+
+[ tglx: Made the variable __ro_after_init ]
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/bugs.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -34,6 +34,12 @@ static void __init ssb_select_mitigation
+ */
+ static u64 __ro_after_init x86_spec_ctrl_base;
+
++/*
++ * The vendor and possibly platform specific bits which can be modified in
++ * x86_spec_ctrl_base.
++ */
++static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;
++
+ void __init check_bugs(void)
+ {
+ identify_boot_cpu();
+@@ -118,7 +124,7 @@ static enum spectre_v2_mitigation spectr
+
+ void x86_spec_ctrl_set(u64 val)
+ {
+- if (val & ~(SPEC_CTRL_IBRS | SPEC_CTRL_RDS))
++ if (val & x86_spec_ctrl_mask)
+ WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val);
+ else
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val);
+@@ -477,6 +483,7 @@ static enum ssb_mitigation_cmd __init __
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_INTEL:
+ x86_spec_ctrl_base |= SPEC_CTRL_RDS;
++ x86_spec_ctrl_mask &= ~SPEC_CTRL_RDS;
+ x86_spec_ctrl_set(SPEC_CTRL_RDS);
+ break;
+ case X86_VENDOR_AMD:
+@@ -500,7 +507,7 @@ static void ssb_select_mitigation()
+ void x86_spec_ctrl_setup_ap(void)
+ {
+ if (boot_cpu_has(X86_FEATURE_IBRS))
+- x86_spec_ctrl_set(x86_spec_ctrl_base & (SPEC_CTRL_IBRS | SPEC_CTRL_RDS));
++ x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
+ }
+
+ #ifdef CONFIG_SYSFS
diff --git a/patches.suse/11-x86-bugs-amd-add-support-to-disable-rds-on-famh-if-requested.patch b/patches.suse/11-x86-bugs-amd-add-support-to-disable-rds-on-famh-if-requested.patch
new file mode 100644
index 0000000000..f4e81449ab
--- /dev/null
+++ b/patches.suse/11-x86-bugs-amd-add-support-to-disable-rds-on-famh-if-requested.patch
@@ -0,0 +1,184 @@
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:24 -0400
+Subject: x86/bugs/AMD: Add support to disable RDS on Fam[15,16,17]h if requested
+Patch-mainline: not yet, queued in subsystem tree
+References: bsc#1087082 CVE-2018-3639
+
+AMD does not need the Speculative Store Bypass mitigation to be enabled.
+
+The parameters for this are already available and can be done via MSR
+C001_1020. Each family uses a different bit in that MSR for this.
+
+[ tglx: Expose the bit mask via a variable and move the actual MSR fiddling
+ into the bugs code as that's the right thing to do and also required
+ to prepare for dynamic enable/disable ]
+
+Suggested-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ arch/x86/include/asm/nospec-branch.h | 4 ++++
+ arch/x86/kernel/cpu/amd.c | 26 ++++++++++++++++++++++++++
+ arch/x86/kernel/cpu/bugs.c | 27 ++++++++++++++++++++++++++-
+ arch/x86/kernel/cpu/common.c | 4 ++++
+ 5 files changed, 61 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -216,6 +216,7 @@
+ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
+ #define X86_FEATURE_USE_IBRS ( 7*32+23) /* "" Use IBRS for Spectre v2 safety */
+ #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+24) /* "" Disable Speculative Store Bypass. */
++#define X86_FEATURE_AMD_RDS (7*32+25) /* "" AMD RDS implementation */
+
+ /* Virtualization flags: Linux defined, word 8 */
+ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -241,6 +241,10 @@ enum ssb_mitigation {
+ SPEC_STORE_BYPASS_DISABLE,
+ };
+
++/* AMD specific Speculative Store Bypass MSR data */
++extern u64 x86_amd_ls_cfg_base;
++extern u64 x86_amd_ls_cfg_rds_mask;
++
+ extern char __indirect_thunk_start[];
+ extern char __indirect_thunk_end[];
+
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -10,6 +10,7 @@
+ #include <asm/processor.h>
+ #include <asm/apic.h>
+ #include <asm/cpu.h>
++#include <asm/nospec-branch.h>
+ #include <asm/smp.h>
+ #include <asm/pci-direct.h>
+ #include <asm/delay.h>
+@@ -553,6 +554,26 @@ static void bsp_init_amd(struct cpuinfo_
+ rdmsrl(MSR_FAM10H_NODE_ID, value);
+ nodes_per_socket = ((value >> 3) & 7) + 1;
+ }
++
++ if (c->x86 >= 0x15 && c->x86 <= 0x17) {
++ unsigned int bit;
++
++ switch (c->x86) {
++ case 0x15: bit = 54; break;
++ case 0x16: bit = 33; break;
++ case 0x17: bit = 10; break;
++ default: return;
++ }
++ /*
++ * Try to cache the base value so further operations can
++ * avoid RMW. If that faults, do not enable RDS.
++ */
++ if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
++ setup_force_cpu_cap(X86_FEATURE_RDS);
++ setup_force_cpu_cap(X86_FEATURE_AMD_RDS);
++ x86_amd_ls_cfg_rds_mask = 1ULL << bit;
++ }
++ }
+ }
+
+ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
+@@ -886,6 +907,11 @@ static void init_amd(struct cpuinfo_x86
+ /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
+ if (!cpu_has(c, X86_FEATURE_XENPV))
+ set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
++
++ if (boot_cpu_has(X86_FEATURE_AMD_RDS)) {
++ set_cpu_cap(c, X86_FEATURE_RDS);
++ set_cpu_cap(c, X86_FEATURE_AMD_RDS);
++ }
+ }
+
+ #ifdef CONFIG_X86_32
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -40,6 +40,13 @@ static u64 __ro_after_init x86_spec_ctrl
+ */
+ static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;
+
++/*
++ * AMD specific MSR info for Speculative Store Bypass control.
++ * x86_amd_ls_cfg_rds_mask is initialized in identify_boot_cpu().
++ */
++u64 __ro_after_init x86_amd_ls_cfg_base;
++u64 __ro_after_init x86_amd_ls_cfg_rds_mask;
++
+ void __init check_bugs(void)
+ {
+ identify_boot_cpu();
+@@ -51,7 +58,8 @@ void __init check_bugs(void)
+
+ /*
+ * Read the SPEC_CTRL MSR to account for reserved bits which may
+- * have unknown values.
++ * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
++ * init code as it is not enumerated and depends on the family.
+ */
+ if (boot_cpu_has(X86_FEATURE_IBRS))
+ rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+@@ -155,6 +163,14 @@ void x86_spec_ctrl_restore_host(u64 gues
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
+
++static void x86_amd_rds_enable(void)
++{
++ u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_rds_mask;
++
++ if (boot_cpu_has(X86_FEATURE_AMD_RDS))
++ wrmsrl(MSR_AMD64_LS_CFG, msrval);
++}
++
+ #ifdef RETPOLINE
+ static bool spectre_v2_bad_module;
+
+@@ -461,6 +477,11 @@ static enum ssb_mitigation_cmd __init __
+
+ switch (cmd) {
+ case SPEC_STORE_BYPASS_CMD_AUTO:
++ /*
++ * AMD platforms by default don't need SSB mitigation.
++ */
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
++ break;
+ case SPEC_STORE_BYPASS_CMD_ON:
+ mode = SPEC_STORE_BYPASS_DISABLE;
+ break;
+@@ -487,6 +508,7 @@ static enum ssb_mitigation_cmd __init __
+ x86_spec_ctrl_set(SPEC_CTRL_RDS);
+ break;
+ case X86_VENDOR_AMD:
++ x86_amd_rds_enable();
+ break;
+ }
+ }
+@@ -508,6 +530,9 @@ void x86_spec_ctrl_setup_ap(void)
+ {
+ if (boot_cpu_has(X86_FEATURE_IBRS))
+ x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
++
++ if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
++ x86_amd_rds_enable();
+ }
+
+ #ifdef CONFIG_SYSFS
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -910,6 +910,10 @@ static const __initconst struct x86_cpu_
+ { X86_VENDOR_CENTAUR, 5, },
+ { X86_VENDOR_INTEL, 5, },
+ { X86_VENDOR_NSC, 5, },
++ { X86_VENDOR_AMD, 0x12, },
++ { X86_VENDOR_AMD, 0x11, },
++ { X86_VENDOR_AMD, 0x10, },
++ { X86_VENDOR_AMD, 0xf, },
+ { X86_VENDOR_ANY, 4, },
+ {}
+ };
diff --git a/patches.suse/12-x86-kvm-vmx-expose-spec_ctrl-bit2-to-the-guest.patch b/patches.suse/12-x86-kvm-vmx-expose-spec_ctrl-bit2-to-the-guest.patch
new file mode 100644
index 0000000000..59d33f1dde
--- /dev/null
+++ b/patches.suse/12-x86-kvm-vmx-expose-spec_ctrl-bit2-to-the-guest.patch
@@ -0,0 +1,61 @@
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 25 Apr 2018 22:04:25 -0400
+Subject: x86/KVM/VMX: Expose SPEC_CTRL Bit(2) to the guest
+Patch-mainline: not yet, queued in subsystem tree
+References: bsc#1087082 CVE-2018-3639
+
+Expose the CPUID.7.EDX[31] bit to the guest, and also guard against various
+combinations of SPEC_CTRL MSR values.
+
+The handling of the MSR (to take into account the host value of SPEC_CTRL
+Bit(2)) is taken care of in patch:
+
+ KVM/SVM/VMX/x86/spectre_v2: Support the combination of guest and host IBRS
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kvm/cpuid.c | 2 +-
+ arch/x86/kvm/vmx.c | 8 +++++---
+ 2 files changed, 6 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -394,7 +394,7 @@ static inline int __do_cpuid_ent(struct
+
+ /* cpuid 7.0.edx*/
+ const u32 kvm_cpuid_7_0_edx_x86_features =
+- F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
++ F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(RDS) |
+ F(ARCH_CAPABILITIES);
+
+ /* all calls to cpuid_count() should be made on the same cpu */
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -3297,7 +3297,8 @@ static int vmx_get_msr(struct kvm_vcpu *
+ case MSR_IA32_SPEC_CTRL:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
+- !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
++ !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
++ !guest_cpuid_has(vcpu, X86_FEATURE_RDS))
+ return 1;
+
+ msr_info->data = to_vmx(vcpu)->spec_ctrl;
+@@ -3418,11 +3419,12 @@ static int vmx_set_msr(struct kvm_vcpu *
+ case MSR_IA32_SPEC_CTRL:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
+- !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
++ !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
++ !guest_cpuid_has(vcpu, X86_FEATURE_RDS))
+ return 1;
+
+ /* The STIBP bit doesn't fault even if it's not advertised */
+- if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP))
++ if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_RDS))
+ return 1;
+
+ vmx->spec_ctrl = data;
diff --git a/patches.suse/13-x86-speculation-create-spec-ctrl-h-to-avoid-include-hell.patch b/patches.suse/13-x86-speculation-create-spec-ctrl-h-to-avoid-include-hell.patch
new file mode 100644
index 0000000000..5cd9be6da0
--- /dev/null
+++ b/patches.suse/13-x86-speculation-create-spec-ctrl-h-to-avoid-include-hell.patch
@@ -0,0 +1,121 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 29 Apr 2018 15:01:37 +0200
+Subject: x86/speculation: Create spec-ctrl.h to avoid include hell
+Patch-mainline: not yet, queued in subsystem tree
+References: bsc#1087082 CVE-2018-3639
+
+Having everything in nospec-branch.h creates a hell of dependencies when
+adding the prctl based switching mechanism. Move everything which is not
+required in nospec-branch.h to spec-ctrl.h and fix up the includes in the
+relevant files.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/nospec-branch.h | 14 --------------
+ arch/x86/include/asm/spec-ctrl.h | 21 +++++++++++++++++++++
+ arch/x86/kernel/cpu/amd.c | 2 +-
+ arch/x86/kernel/cpu/bugs.c | 2 +-
+ arch/x86/kvm/svm.c | 2 +-
+ arch/x86/kvm/vmx.c | 2 +-
+ 6 files changed, 25 insertions(+), 18 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -225,26 +225,12 @@ enum spectre_v2_mitigation {
+ extern void x86_spec_ctrl_set(u64);
+ extern u64 x86_spec_ctrl_get_default(void);
+
+-/*
+- * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
+- * the guest has, while on VMEXIT we restore the host view. This
+- * would be easier if SPEC_CTRL were architecturally maskable or
+- * shadowable for guests but this is not (currently) the case.
+- * Takes the guest view of SPEC_CTRL MSR as a parameter.
+- */
+-extern void x86_spec_ctrl_set_guest(u64);
+-extern void x86_spec_ctrl_restore_host(u64);
+-
+ /* The Speculative Store Bypass disable variants */
+ enum ssb_mitigation {
+ SPEC_STORE_BYPASS_NONE,
+ SPEC_STORE_BYPASS_DISABLE,
+ };
+
+-/* AMD specific Speculative Store Bypass MSR data */
+-extern u64 x86_amd_ls_cfg_base;
+-extern u64 x86_amd_ls_cfg_rds_mask;
+-
+ extern char __indirect_thunk_start[];
+ extern char __indirect_thunk_end[];
+
+--- /dev/null
++++ b/arch/x86/include/asm/spec-ctrl.h
+@@ -0,0 +1,21 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_X86_SPECCTRL_H_
++#define _ASM_X86_SPECCTRL_H_
++
++#include <asm/nospec-branch.h>
++
++/*
++ * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
++ * the guest has, while on VMEXIT we restore the host view. This
++ * would be easier if SPEC_CTRL were architecturally maskable or
++ * shadowable for guests but this is not (currently) the case.
++ * Takes the guest view of SPEC_CTRL MSR as a parameter.
++ */
++extern void x86_spec_ctrl_set_guest(u64);
++extern void x86_spec_ctrl_restore_host(u64);
++
++/* AMD specific Speculative Store Bypass MSR data */
++extern u64 x86_amd_ls_cfg_base;
++extern u64 x86_amd_ls_cfg_rds_mask;
++
++#endif
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -10,7 +10,7 @@
+ #include <asm/processor.h>
+ #include <asm/apic.h>
+ #include <asm/cpu.h>
+-#include <asm/nospec-branch.h>
++#include <asm/spec-ctrl.h>
+ #include <asm/smp.h>
+ #include <asm/pci-direct.h>
+ #include <asm/delay.h>
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -12,7 +12,7 @@
+ #include <linux/cpu.h>
+ #include <linux/module.h>
+
+-#include <asm/nospec-branch.h>
++#include <asm/spec-ctrl.h>
+ #include <asm/cmdline.h>
+ #include <asm/bugs.h>
+ #include <asm/processor.h>
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -50,7 +50,7 @@
+ #include <asm/kvm_para.h>
+ #include <asm/irq_remapping.h>
+ #include <asm/microcode.h>
+-#include <asm/nospec-branch.h>
++#include <asm/spec-ctrl.h>
+
+ #include <asm/virtext.h>
+ #include "trace.h"
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -52,7 +52,7 @@
+ #include <asm/irq_remapping.h>
+ #include <asm/mmu_context.h>
+ #include <asm/microcode.h>
+-#include <asm/nospec-branch.h>
++#include <asm/spec-ctrl.h>
+
+ #include "trace.h"
+ #include "pmu.h"
diff --git a/patches.suse/14-prctl-add-speculation-control-prctls.patch b/patches.suse/14-prctl-add-speculation-control-prctls.patch
new file mode 100644
index 0000000000..d7da451090
--- /dev/null
+++ b/patches.suse/14-prctl-add-speculation-control-prctls.patch
@@ -0,0 +1,224 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 29 Apr 2018 15:20:11 +0200
+Subject: prctl: Add speculation control prctls
+Patch-mainline: not yet, queued in subsystem tree
+References: bsc#1087082 CVE-2018-3639
+
+Add two new prctls to control aspects of speculation related vulnerabilites
+and their mitigations to provide finer grained control over performance
+impacting mitigations.
+
+PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature
+which is selected with arg2 of prctl(2). The return value uses bit 0-2 with
+the following meaning:
+
+Bit Define Description
+0 PR_SPEC_PRCTL Mitigation can be controlled per task by
+ PR_SET_SPECULATION_CTRL
+1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
+ disabled
+2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
+ enabled
+
+If all bits are 0 the CPU is not affected by the speculation misfeature.
+
+If PR_SPEC_PRCTL is set, then the per task control of the mitigation is
+available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation
+misfeature will fail.
+
+PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which
+is selected by arg2 of prctl(2) per task. arg3 is used to hand in the
+control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE.
+
+The common return values are:
+
+EINVAL prctl is not implemented by the architecture or the unused prctl()
+ arguments are not 0
+ENODEV arg2 is selecting a not supported speculation misfeature
+
+PR_SET_SPECULATION_CTRL has these additional return values:
+
+ERANGE arg3 is incorrect, i.e. it's not either PR_SPEC_ENABLE or PR_SPEC_DISABLE
+ENXIO prctl control of the selected speculation misfeature is disabled
+
+The first supported controlable speculation misfeature is
+PR_SPEC_STORE_BYPASS. Add the define so this can be shared between
+architectures.
+
+Based on an initial patch from Tim Chen and mostly rewritten.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ Documentation/userspace-api/spec_ctrl.rst | 86 ++++++++++++++++++++++++++++++
+ include/linux/nospec.h | 5 +
+ include/uapi/linux/prctl.h | 11 +++
+ kernel/sys.c | 22 +++++++
+ 4 files changed, 124 insertions(+)
+
+--- /dev/null
++++ b/Documentation/userspace-api/spec_ctrl.rst
+@@ -0,0 +1,86 @@
++===================
++Speculation Control
++===================
++
++Quite some CPUs have speculation related misfeatures which are in fact
++vulnerabilites causing data leaks in various forms even accross privilege
++domains.
++
++The kernel provides mitigation for such vulnerabilities in various
++forms. Some of these mitigations are compile time configurable and some on
++the kernel command line.
++
++There is also a class of mitigations which are very expensive, but they can
++be restricted to a certain set of processes or tasks in controlled
++environments. The mechanism to control these mitigations is via
++:manpage:`prctl(2)`.
++
++There are two prctl options which are related to this:
++
++ * PR_GET_SPECULATION_CTRL
++
++ * PR_SET_SPECULATION_CTRL
++
++PR_GET_SPECULATION_CTRL
++-----------------------
++
++PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature
++which is selected with arg2 of prctl(2). The return value uses bits 0-2 with
++the following meaning:
++
++==== ================ ===================================================
++Bit Define Description
++==== ================ ===================================================
++0 PR_SPEC_PRCTL Mitigation can be controlled per task by
++ PR_SET_SPECULATION_CTRL
++1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
++ disabled
++2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
++ enabled
++==== ================ ===================================================
++
++If all bits are 0 the CPU is not affected by the speculation misfeature.
++
++If PR_SPEC_PRCTL is set, then the per task control of the mitigation is
++available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation
++misfeature will fail.
++
++PR_SET_SPECULATION_CTRL
++-----------------------
++PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which
++is selected by arg2 of :manpage:`prctl(2)` per task. arg3 is used to hand
++in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE.
++
++Common error codes
++------------------
++======= =================================================================
++Value Meaning
++======= =================================================================
++EINVAL The prctl is not implemented by the architecture or unused
++ prctl(2) arguments are not 0
++
++ENODEV arg2 is selecting a not supported speculation misfeature
++======= =================================================================
++
++PR_SET_SPECULATION_CTRL error codes
++-----------------------------------
++======= =================================================================
++Value Meaning
++======= =================================================================
++0 Success
++
++ERANGE arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor
++ PR_SPEC_DISABLE
++
++ENXIO Control of the selected speculation misfeature is not possible.
++ See PR_GET_SPECULATION_CTRL.
++======= =================================================================
++
++Speculation misfeature controls
++-------------------------------
++- PR_SPEC_STORE_BYPASS: Speculative Store Bypass
++
++ Invocations:
++ * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, 0, 0, 0);
++ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
++ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
+--- a/include/linux/nospec.h
++++ b/include/linux/nospec.h
+@@ -75,4 +75,9 @@ static inline unsigned long array_index_
+ _i &= _mask; \
+ _i; \
+ })
++
++/* Speculation control prctl */
++int arch_prctl_spec_ctrl_get(unsigned long which);
++int arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl);
++
+ #endif /* _LINUX_NOSPEC_H */
+--- a/include/uapi/linux/prctl.h
++++ b/include/uapi/linux/prctl.h
+@@ -197,4 +197,15 @@ struct prctl_mm_map {
+ # define PR_CAP_AMBIENT_LOWER 3
+ # define PR_CAP_AMBIENT_CLEAR_ALL 4
+
++/* Per task speculation control */
++#define PR_GET_SPECULATION_CTRL 52
++#define PR_SET_SPECULATION_CTRL 53
++/* Speculation control variants */
++# define PR_SPEC_STORE_BYPASS 0
++/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
++# define PR_SPEC_NOT_AFFECTED 0
++# define PR_SPEC_PRCTL (1UL << 0)
++# define PR_SPEC_ENABLE (1UL << 1)
++# define PR_SPEC_DISABLE (1UL << 2)
++
+ #endif /* _LINUX_PRCTL_H */
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -60,6 +60,8 @@
+ #include <linux/uidgid.h>
+ #include <linux/cred.h>
+
++#include <linux/nospec.h>
++
+ #include <linux/kmsg_dump.h>
+ /* Move somewhere else to avoid recompiling? */
+ #include <generated/utsrelease.h>
+@@ -2093,6 +2095,16 @@ static int propagate_has_child_subreaper
+ return 1;
+ }
+
++int __weak arch_prctl_spec_ctrl_get(unsigned long which)
++{
++ return -EINVAL;
++}
++
++int __weak arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl)
++{
++ return -EINVAL;
++}
++
+ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
+ unsigned long, arg4, unsigned long, arg5)
+ {
+@@ -2295,6 +2307,16 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
+ case PR_GET_FP_MODE:
+ error = GET_FP_MODE(me);
+ break;
++ case PR_GET_SPECULATION_CTRL:
++ if (arg3 || arg4 || arg5)
++ return -EINVAL;
++ error = arch_prctl_spec_ctrl_get(arg2);
++ break;
++ case PR_SET_SPECULATION_CTRL:
++ if (arg4 || arg5)
++ return -EINVAL;
++ error = arch_prctl_spec_ctrl_set(arg2, arg3);
++ break;
+ default:
+ error = -EINVAL;
+ break;
diff --git a/patches.suse/15-x86-process-allow-runtime-control-of-speculative-store-bypass.patch b/patches.suse/15-x86-process-allow-runtime-control-of-speculative-store-bypass.patch
new file mode 100644
index 0000000000..c5a4335245
--- /dev/null
+++ b/patches.suse/15-x86-process-allow-runtime-control-of-speculative-store-bypass.patch
@@ -0,0 +1,210 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 29 Apr 2018 15:21:42 +0200
+Subject: x86/process: Allow runtime control of Speculative Store Bypass
+Patch-mainline: not yet, queued in subsystem tree
+References: bsc#1087082 CVE-2018-3639
+
+The Speculative Store Bypass vulnerability can be mitigated with the
+Reduced Data Speculation (RDS) feature. To allow finer grained control of
+this eventually expensive mitigation a per task mitigation control is
+required.
+
+Add a new TIF_RDS flag and put it into the group of TIF flags which are
+evaluated for mismatch in switch_to(). If these bits differ in the previous
+and the next task, then the slow path function __switch_to_xtra() is
+invoked. Implement the TIF_RDS dependent mitigation control in the slow
+path.
+
+If the prctl for controlling Speculative Store Bypass is disabled or no
+task uses the prctl then there is no overhead in the switch_to() fast
+path.
+
+Update the KVM related speculation control functions to take TID_RDS into
+account as well.
+
+Based on a patch from Tim Chen. Completely rewritten.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/msr-index.h | 3 ++-
+ arch/x86/include/asm/spec-ctrl.h | 17 +++++++++++++++++
+ arch/x86/include/asm/thread_info.h | 4 +++-
+ arch/x86/kernel/cpu/bugs.c | 26 +++++++++++++++++++++-----
+ arch/x86/kernel/process.c | 22 ++++++++++++++++++++++
+ 5 files changed, 65 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -41,7 +41,8 @@
+ #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
+ #define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
+ #define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
+-#define SPEC_CTRL_RDS (1 << 2) /* Reduced Data Speculation */
++#define SPEC_CTRL_RDS_SHIFT 2 /* Reduced Data Speculation bit */
++#define SPEC_CTRL_RDS (1 << SPEC_CTRL_RDS_SHIFT) /* Reduced Data Speculation */
+
+ #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
+ #define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
+--- a/arch/x86/include/asm/spec-ctrl.h
++++ b/arch/x86/include/asm/spec-ctrl.h
+@@ -2,6 +2,7 @@
+ #ifndef _ASM_X86_SPECCTRL_H_
+ #define _ASM_X86_SPECCTRL_H_
+
++#include <linux/thread_info.h>
+ #include <asm/nospec-branch.h>
+
+ /*
+@@ -18,4 +19,20 @@ extern void x86_spec_ctrl_restore_host(u
+ extern u64 x86_amd_ls_cfg_base;
+ extern u64 x86_amd_ls_cfg_rds_mask;
+
++/* The Intel SPEC CTRL MSR base value cache */
++extern u64 x86_spec_ctrl_base;
++
++static inline u64 rds_tif_to_spec_ctrl(u64 tifn)
++{
++ BUILD_BUG_ON(TIF_RDS < SPEC_CTRL_RDS_SHIFT);
++ return (tifn & _TIF_RDS) >> (TIF_RDS - SPEC_CTRL_RDS_SHIFT);
++}
++
++static inline u64 rds_tif_to_amd_ls_cfg(u64 tifn)
++{
++ return (tifn & _TIF_RDS) ? x86_amd_ls_cfg_rds_mask : 0ULL;
++}
++
++extern void speculative_store_bypass_update(void);
++
+ #endif
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -80,6 +80,7 @@ struct thread_info {
+ #define TIF_SIGPENDING 2 /* signal pending */
+ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+ #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
++#define TIF_RDS 5 /* Reduced data speculation */
+ #define TIF_SYSCALL_EMU 6 /* syscall emulation active */
+ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
+ #define TIF_SECCOMP 8 /* secure computing */
+@@ -105,6 +106,7 @@ struct thread_info {
+ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+ #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+ #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
++#define _TIF_RDS (1 << TIF_RDS)
+ #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
+ #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+ #define _TIF_SECCOMP (1 << TIF_SECCOMP)
+@@ -142,7 +144,7 @@ struct thread_info {
+
+ /* flags to check in __switch_to() */
+ #define _TIF_WORK_CTXSW \
+- (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP)
++ (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_RDS)
+
+ #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
+ #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -32,7 +32,7 @@ static void __init ssb_select_mitigation
+ * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
+ * writes to SPEC_CTRL contain whatever reserved bits have been set.
+ */
+-static u64 __ro_after_init x86_spec_ctrl_base;
++u64 __ro_after_init x86_spec_ctrl_base;
+
+ /*
+ * The vendor and possibly platform specific bits which can be modified in
+@@ -141,25 +141,41 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
+
+ u64 x86_spec_ctrl_get_default(void)
+ {
+- return x86_spec_ctrl_base;
++ u64 msrval = x86_spec_ctrl_base;
++
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
++ msrval |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
++ return msrval;
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
+
+ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
+ {
++ u64 host = x86_spec_ctrl_base;
++
+ if (!boot_cpu_has(X86_FEATURE_IBRS))
+ return;
+- if (x86_spec_ctrl_base != guest_spec_ctrl)
++
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
++ host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
++
++ if (host != guest_spec_ctrl)
+ wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
+
+ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
+ {
++ u64 host = x86_spec_ctrl_base;
++
+ if (!boot_cpu_has(X86_FEATURE_IBRS))
+ return;
+- if (x86_spec_ctrl_base != guest_spec_ctrl)
+- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
++
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
++ host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
++
++ if (host != guest_spec_ctrl)
++ wrmsrl(MSR_IA32_SPEC_CTRL, host);
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
+
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -38,6 +38,7 @@
+ #include <asm/switch_to.h>
+ #include <asm/desc.h>
+ #include <asm/prctl.h>
++#include <asm/spec-ctrl.h>
+
+ /*
+ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
+@@ -278,6 +279,24 @@ static inline void switch_to_bitmap(stru
+ }
+ }
+
++static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
++{
++ u64 msr;
++
++ if (static_cpu_has(X86_FEATURE_AMD_RDS)) {
++ msr = x86_amd_ls_cfg_base | rds_tif_to_amd_ls_cfg(tifn);
++ wrmsrl(MSR_AMD64_LS_CFG, msr);
++ } else {
++ msr = x86_spec_ctrl_base | rds_tif_to_spec_ctrl(tifn);
++ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
++ }
++}
++
++void speculative_store_bypass_update(void)
++{
++ __speculative_store_bypass_update(current_thread_info()->flags);
++}
++
+ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+ struct tss_struct *tss)
+ {
+@@ -309,6 +328,9 @@ void __switch_to_xtra(struct task_struct
+
+ if ((tifp ^ tifn) & _TIF_NOCPUID)
+ set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
++
++ if ((tifp ^ tifn) & _TIF_RDS)
++ __speculative_store_bypass_update(tifn);
+ }
+
+ /*
diff --git a/patches.suse/16-x86-speculation-add-prctl-for-speculative-store-bypass-mitigation.patch b/patches.suse/16-x86-speculation-add-prctl-for-speculative-store-bypass-mitigation.patch
new file mode 100644
index 0000000000..16939e252b
--- /dev/null
+++ b/patches.suse/16-x86-speculation-add-prctl-for-speculative-store-bypass-mitigation.patch
@@ -0,0 +1,209 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 29 Apr 2018 15:26:40 +0200
+Subject: x86/speculation: Add prctl for Speculative Store Bypass mitigation
+Patch-mainline: not yet, queued in subsystem tree
+References: bsc#1087082 CVE-2018-3639
+
+Add prctl based control for Speculative Store Bypass mitigation and make it
+the default mitigation for Intel and AMD.
+
+Andi Kleen provided the following rationale (slightly redacted):
+
+ There are multiple levels of impact of Speculative Store Bypass:
+
+ 1) JITed sandbox.
+ It cannot invoke system calls, but can do PRIME+PROBE and may have call
+ interfaces to other code
+
+ 2) Native code process.
+ No protection inside the process at this level.
+
+ 3) Kernel.
+
+ 4) Between processes.
+
+ The prctl tries to protect against case (1) doing attacks.
+
+ If the untrusted code can do random system calls then control is already
+ lost in a much worse way. So there needs to be system call protection in
+ some way (using a JIT not allowing them or seccomp). Or rather if the
+ process can subvert its environment somehow to do the prctl it can already
+ execute arbitrary code, which is much worse than SSB.
+
+ To put it differently, the point of the prctl is to not allow JITed code
+ to read data it shouldn't read from its JITed sandbox. If it already has
+ escaped its sandbox then it can already read everything it wants in its
+ address space, and do much worse.
+
+ The ability to control Speculative Store Bypass allows to enable the
+ protection selectively without affecting overall system performance.
+
+Based on an initial patch from Tim Chen. Completely rewritten.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ Documentation/admin-guide/kernel-parameters.txt | 6 +
+ arch/x86/include/asm/nospec-branch.h | 1
+ arch/x86/kernel/cpu/bugs.c | 83 +++++++++++++++++++++---
+ 3 files changed, 79 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -229,6 +229,7 @@ extern u64 x86_spec_ctrl_get_default(voi
+ enum ssb_mitigation {
+ SPEC_STORE_BYPASS_NONE,
+ SPEC_STORE_BYPASS_DISABLE,
++ SPEC_STORE_BYPASS_PRCTL,
+ };
+
+ extern char __indirect_thunk_start[];
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -11,6 +11,8 @@
+ #include <linux/utsname.h>
+ #include <linux/cpu.h>
+ #include <linux/module.h>
++#include <linux/nospec.h>
++#include <linux/prctl.h>
+
+ #include <asm/spec-ctrl.h>
+ #include <asm/cmdline.h>
+@@ -430,20 +432,23 @@ enum ssb_mitigation_cmd {
+ SPEC_STORE_BYPASS_CMD_NONE,
+ SPEC_STORE_BYPASS_CMD_AUTO,
+ SPEC_STORE_BYPASS_CMD_ON,
++ SPEC_STORE_BYPASS_CMD_PRCTL,
+ };
+
+ static const char *ssb_strings[] = {
+ [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
+- [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled"
++ [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
++ [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl"
+ };
+
+ static const struct {
+ const char *option;
+ enum ssb_mitigation_cmd cmd;
+ } ssb_mitigation_options[] = {
+- { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
+- { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
+- { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
++ { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
++ { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
++ { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
++ { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
+ };
+
+ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
+@@ -493,14 +498,15 @@ static enum ssb_mitigation_cmd __init __
+
+ switch (cmd) {
+ case SPEC_STORE_BYPASS_CMD_AUTO:
+- /*
+- * AMD platforms by default don't need SSB mitigation.
+- */
+- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+- break;
++ /* Choose prctl as the default mode */
++ mode = SPEC_STORE_BYPASS_PRCTL;
++ break;
+ case SPEC_STORE_BYPASS_CMD_ON:
+ mode = SPEC_STORE_BYPASS_DISABLE;
+ break;
++ case SPEC_STORE_BYPASS_CMD_PRCTL:
++ mode = SPEC_STORE_BYPASS_PRCTL;
++ break;
+ case SPEC_STORE_BYPASS_CMD_NONE:
+ break;
+ }
+@@ -511,7 +517,7 @@ static enum ssb_mitigation_cmd __init __
+ * - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass
+ * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
+ */
+- if (mode != SPEC_STORE_BYPASS_NONE) {
++ if (mode == SPEC_STORE_BYPASS_DISABLE) {
+ setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
+ /*
+ * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
+@@ -542,6 +548,63 @@ static void ssb_select_mitigation()
+
+ #undef pr_fmt
+
++static int ssb_prctl_set(unsigned long ctrl)
++{
++ bool rds = !!test_tsk_thread_flag(current, TIF_RDS);
++
++ if (ssb_mode != SPEC_STORE_BYPASS_PRCTL)
++ return -ENXIO;
++
++ if (ctrl == PR_SPEC_ENABLE)
++ clear_tsk_thread_flag(current, TIF_RDS);
++ else
++ set_tsk_thread_flag(current, TIF_RDS);
++
++ if (rds != !!test_tsk_thread_flag(current, TIF_RDS))
++ speculative_store_bypass_update();
++
++ return 0;
++}
++
++static int ssb_prctl_get(void)
++{
++ switch (ssb_mode) {
++ case SPEC_STORE_BYPASS_DISABLE:
++ return PR_SPEC_DISABLE;
++ case SPEC_STORE_BYPASS_PRCTL:
++ if (test_tsk_thread_flag(current, TIF_RDS))
++ return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
++ return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
++ default:
++ if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
++ return PR_SPEC_ENABLE;
++ return PR_SPEC_NOT_AFFECTED;
++ }
++}
++
++int arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl)
++{
++ if (ctrl != PR_SPEC_ENABLE && ctrl != PR_SPEC_DISABLE)
++ return -ERANGE;
++
++ switch (which) {
++ case PR_SPEC_STORE_BYPASS:
++ return ssb_prctl_set(ctrl);
++ default:
++ return -ENODEV;
++ }
++}
++
++int arch_prctl_spec_ctrl_get(unsigned long which)
++{
++ switch (which) {
++ case PR_SPEC_STORE_BYPASS:
++ return ssb_prctl_get();
++ default:
++ return -ENODEV;
++ }
++}
++
+ void x86_spec_ctrl_setup_ap(void)
+ {
+ if (boot_cpu_has(X86_FEATURE_IBRS))
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -3932,7 +3932,11 @@
+ off - Unconditionally enable Speculative Store Bypass
+ auto - Kernel detects whether the CPU model contains an
+ implementation of Speculative Store Bypass and
+- picks the most appropriate mitigation
++ picks the most appropriate mitigation.
++ prctl - Control Speculative Store Bypass per thread
++ via prctl. Speculative Store Bypass is enabled
++ for a process by default. The state of the control
++ is inherited on fork.
+
+ Not specifying this option is equivalent to
+ spec_store_bypass_disable=auto.
diff --git a/patches.suse/17-nospec-allow-getting-setting-on-non-current-task.patch b/patches.suse/17-nospec-allow-getting-setting-on-non-current-task.patch
new file mode 100644
index 0000000000..2bdab223b0
--- /dev/null
+++ b/patches.suse/17-nospec-allow-getting-setting-on-non-current-task.patch
@@ -0,0 +1,151 @@
+From: Kees Cook <keescook@chromium.org>
+Date: Tue, 1 May 2018 15:19:04 -0700
+Subject: nospec: Allow getting/setting on non-current task
+Patch-mainline: not yet, queued in subsystem tree
+References: bsc#1087082 CVE-2018-3639
+
+Adjust arch_prctl_get/set_spec_ctrl() to operate on tasks other than
+current.
+
+This is needed both for /proc/$pid/status queries and for seccomp (since
+thread-syncing can trigger seccomp in non-current threads).
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/bugs.c | 27 ++++++++++++++++-----------
+ include/linux/nospec.h | 7 +++++--
+ kernel/sys.c | 9 +++++----
+ 3 files changed, 26 insertions(+), 17 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -548,31 +548,35 @@ static void ssb_select_mitigation()
+
+ #undef pr_fmt
+
+-static int ssb_prctl_set(unsigned long ctrl)
++static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
+ {
+- bool rds = !!test_tsk_thread_flag(current, TIF_RDS);
++ bool rds = !!test_tsk_thread_flag(task, TIF_RDS);
+
+ if (ssb_mode != SPEC_STORE_BYPASS_PRCTL)
+ return -ENXIO;
+
+ if (ctrl == PR_SPEC_ENABLE)
+- clear_tsk_thread_flag(current, TIF_RDS);
++ clear_tsk_thread_flag(task, TIF_RDS);
+ else
+- set_tsk_thread_flag(current, TIF_RDS);
++ set_tsk_thread_flag(task, TIF_RDS);
+
+- if (rds != !!test_tsk_thread_flag(current, TIF_RDS))
++ /*
++ * If being set on non-current task, delay setting the CPU
++ * mitigation until it is next scheduled.
++ */
++ if (task == current && rds != !!test_tsk_thread_flag(task, TIF_RDS))
+ speculative_store_bypass_update();
+
+ return 0;
+ }
+
+-static int ssb_prctl_get(void)
++static int ssb_prctl_get(struct task_struct *task)
+ {
+ switch (ssb_mode) {
+ case SPEC_STORE_BYPASS_DISABLE:
+ return PR_SPEC_DISABLE;
+ case SPEC_STORE_BYPASS_PRCTL:
+- if (test_tsk_thread_flag(current, TIF_RDS))
++ if (test_tsk_thread_flag(task, TIF_RDS))
+ return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+ return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+ default:
+@@ -582,24 +586,25 @@ static int ssb_prctl_get(void)
+ }
+ }
+
+-int arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl)
++int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
++ unsigned long ctrl)
+ {
+ if (ctrl != PR_SPEC_ENABLE && ctrl != PR_SPEC_DISABLE)
+ return -ERANGE;
+
+ switch (which) {
+ case PR_SPEC_STORE_BYPASS:
+- return ssb_prctl_set(ctrl);
++ return ssb_prctl_set(task, ctrl);
+ default:
+ return -ENODEV;
+ }
+ }
+
+-int arch_prctl_spec_ctrl_get(unsigned long which)
++int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
+ {
+ switch (which) {
+ case PR_SPEC_STORE_BYPASS:
+- return ssb_prctl_get();
++ return ssb_prctl_get(task);
+ default:
+ return -ENODEV;
+ }
+--- a/include/linux/nospec.h
++++ b/include/linux/nospec.h
+@@ -6,6 +6,8 @@
+ #ifndef _LINUX_NOSPEC_H
+ #define _LINUX_NOSPEC_H
+
++struct task_struct;
++
+ /**
+ * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
+ * @index: array element index
+@@ -77,7 +79,8 @@ static inline unsigned long array_index_
+ })
+
+ /* Speculation control prctl */
+-int arch_prctl_spec_ctrl_get(unsigned long which);
+-int arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl);
++int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which);
++int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
++ unsigned long ctrl);
+
+ #endif /* _LINUX_NOSPEC_H */
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -2095,12 +2095,13 @@ static int propagate_has_child_subreaper
+ return 1;
+ }
+
+-int __weak arch_prctl_spec_ctrl_get(unsigned long which)
++int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
+ {
+ return -EINVAL;
+ }
+
+-int __weak arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl)
++int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
++ unsigned long ctrl)
+ {
+ return -EINVAL;
+ }
+@@ -2310,12 +2311,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
+ case PR_GET_SPECULATION_CTRL:
+ if (arg3 || arg4 || arg5)
+ return -EINVAL;
+- error = arch_prctl_spec_ctrl_get(arg2);
++ error = arch_prctl_spec_ctrl_get(me, arg2);
+ break;
+ case PR_SET_SPECULATION_CTRL:
+ if (arg4 || arg5)
+ return -EINVAL;
+- error = arch_prctl_spec_ctrl_set(arg2, arg3);
++ error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
+ break;
+ default:
+ error = -EINVAL;
diff --git a/patches.suse/18-proc-provide-details-on-speculation-flaw-mitigations.patch b/patches.suse/18-proc-provide-details-on-speculation-flaw-mitigations.patch
new file mode 100644
index 0000000000..018793747b
--- /dev/null
+++ b/patches.suse/18-proc-provide-details-on-speculation-flaw-mitigations.patch
@@ -0,0 +1,54 @@
+From: Kees Cook <keescook@chromium.org>
+Date: Tue, 1 May 2018 15:31:45 -0700
+Subject: proc: Provide details on speculation flaw mitigations
+Patch-mainline: not yet, queued in subsystem tree
+References: bsc#1087082 CVE-2018-3639
+
+As done with seccomp and no_new_privs, also show speculation flaw
+mitigation state in /proc/$pid/status.
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ fs/proc/array.c | 22 ++++++++++++++++++++++
+ 1 file changed, 22 insertions(+)
+
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -84,6 +84,7 @@
+ #include <linux/delayacct.h>
+ #include <linux/seq_file.h>
+ #include <linux/pid_namespace.h>
++#include <linux/prctl.h>
+ #include <linux/ptrace.h>
+ #include <linux/tracehook.h>
+ #include <linux/string_helpers.h>
+@@ -351,6 +352,27 @@ static inline void task_seccomp(struct s
+ #ifdef CONFIG_SECCOMP
+ seq_put_decimal_ull(m, "\nSeccomp:\t", p->seccomp.mode);
+ #endif
++ seq_printf(m, "\nSpeculation Store Bypass:\t");
++ switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
++ case -EINVAL:
++ seq_printf(m, "unknown");
++ break;
++ case PR_SPEC_NOT_AFFECTED:
++ seq_printf(m, "not vulnerable");
++ break;
++ case PR_SPEC_PRCTL | PR_SPEC_DISABLE:
++ seq_printf(m, "thread mitigated");
++ break;
++ case PR_SPEC_PRCTL | PR_SPEC_ENABLE:
++ seq_printf(m, "thread vulnerable");
++ break;
++ case PR_SPEC_DISABLE:
++ seq_printf(m, "globally mitigated");
++ break;
++ default:
++ seq_printf(m, "vulnerable");
++ break;
++ }
+ seq_putc(m, '\n');
+ }
+
diff --git a/patches.suse/19-seccomp-enable-speculation-flaw-mitigations.patch b/patches.suse/19-seccomp-enable-speculation-flaw-mitigations.patch
new file mode 100644
index 0000000000..7bd9596240
--- /dev/null
+++ b/patches.suse/19-seccomp-enable-speculation-flaw-mitigations.patch
@@ -0,0 +1,57 @@
+From: Kees Cook <keescook@chromium.org>
+Date: Tue, 1 May 2018 15:07:31 -0700
+Subject: seccomp: Enable speculation flaw mitigations
+Patch-mainline: not yet, queued in subsystem tree
+References: bsc#1087082 CVE-2018-3639
+
+When speculation flaw mitigations are opt-in (via prctl), using seccomp
+will automatically opt-in to these protections, since using seccomp
+indicates at least some level of sandboxing is desired.
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ kernel/seccomp.c | 17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -17,6 +17,8 @@
+ #include <linux/audit.h>
+ #include <linux/compat.h>
+ #include <linux/coredump.h>
++#include <linux/nospec.h>
++#include <linux/prctl.h>
+ #include <linux/sched.h>
+ #include <linux/sched/task_stack.h>
+ #include <linux/seccomp.h>
+@@ -215,6 +217,19 @@ static inline bool seccomp_may_assign_mo
+ return true;
+ }
+
++/*
++ * If a given speculation mitigation is opt-in (prctl()-controlled),
++ * select it, by disabling speculation (enabling mitigation).
++ */
++static inline void spec_mitigate(struct task_struct *task,
++ unsigned long which)
++{
++ int state = arch_prctl_spec_ctrl_get(task, which);
++
++ if (state > 0 && (state & PR_SPEC_PRCTL))
++ arch_prctl_spec_ctrl_set(task, which, PR_SPEC_DISABLE);
++}
++
+ static inline void seccomp_assign_mode(struct task_struct *task,
+ unsigned long seccomp_mode)
+ {
+@@ -226,6 +241,8 @@ static inline void seccomp_assign_mode(s
+ * filter) is set.
+ */
+ smp_mb__before_atomic();
++ /* Assume seccomp processes want speculation flaw mitigation. */
++ spec_mitigate(task, PR_SPEC_STORE_BYPASS);
+ set_tsk_thread_flag(task, TIF_SECCOMP);
+ }
+
diff --git a/patches.suse/20-x86-bugs-make-boot-modes-_ro_after_init.patch b/patches.suse/20-x86-bugs-make-boot-modes-_ro_after_init.patch
new file mode 100644
index 0000000000..76877843f3
--- /dev/null
+++ b/patches.suse/20-x86-bugs-make-boot-modes-_ro_after_init.patch
@@ -0,0 +1,36 @@
+From: Kees Cook <keescook@chromium.org>
+Date: Thu, 3 May 2018 15:03:30 -0700
+Subject: x86/bugs: Make boot modes __ro_after_init
+Patch-mainline: not yet, queued in subsystem tree
+References: bsc#1087082 CVE-2018-3639
+
+There's no reason for these to be changed after boot.
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/bugs.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -130,7 +130,8 @@ static const char *spectre_v2_strings[]
+ #undef pr_fmt
+ #define pr_fmt(fmt) "Spectre V2 : " fmt
+
+-static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
++static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
++ SPECTRE_V2_NONE;
+
+ void x86_spec_ctrl_set(u64 val)
+ {
+@@ -425,7 +426,7 @@ retpoline_auto:
+ #undef pr_fmt
+ #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
+
+-static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE;
++static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
+
+ /* The kernel command line selection */
+ enum ssb_mitigation_cmd {
diff --git a/patches.suse/21-prctl-add-force-disable-speculation.patch b/patches.suse/21-prctl-add-force-disable-speculation.patch
new file mode 100644
index 0000000000..4da48c396a
--- /dev/null
+++ b/patches.suse/21-prctl-add-force-disable-speculation.patch
@@ -0,0 +1,204 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 3 May 2018 22:09:15 +0200
+Subject: prctl: Add force disable speculation
+Patch-mainline: not yet, queued in subsystem tree
+References: bsc#1087082 CVE-2018-3639
+
+For certain use cases it is desired to enforce mitigations so they cannot
+be undone afterwards. That's important for loader stubs which want to
+prevent a child from disabling the mitigation again. Will also be used for
+seccomp(). The extra state preserving of the prctl state for SSB is a
+preparatory step for EBPF dymanic speculation control.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ Documentation/userspace-api/spec_ctrl.rst | 34 ++++++++++++++++++-----------
+ arch/x86/kernel/cpu/bugs.c | 35 +++++++++++++++++++++---------
+ fs/proc/array.c | 3 ++
+ include/linux/sched.h | 10 +++++++-
+ include/uapi/linux/prctl.h | 1
+ 5 files changed, 59 insertions(+), 24 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -551,21 +551,37 @@ static void ssb_select_mitigation()
+
+ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
+ {
+- bool rds = !!test_tsk_thread_flag(task, TIF_RDS);
++ bool update;
+
+ if (ssb_mode != SPEC_STORE_BYPASS_PRCTL)
+ return -ENXIO;
+
+- if (ctrl == PR_SPEC_ENABLE)
+- clear_tsk_thread_flag(task, TIF_RDS);
+- else
+- set_tsk_thread_flag(task, TIF_RDS);
++ switch (ctrl) {
++ case PR_SPEC_ENABLE:
++ /* If speculation is force disabled, enable is not allowed */
++ if (task_spec_ssb_force_disable(task))
++ return -EPERM;
++ task_clear_spec_ssb_disable(task);
++ update = test_and_clear_tsk_thread_flag(task, TIF_RDS);
++ break;
++ case PR_SPEC_DISABLE:
++ task_set_spec_ssb_disable(task);
++ update = !test_and_set_tsk_thread_flag(task, TIF_RDS);
++ break;
++ case PR_SPEC_FORCE_DISABLE:
++ task_set_spec_ssb_disable(task);
++ task_set_spec_ssb_force_disable(task);
++ update = !test_and_set_tsk_thread_flag(task, TIF_RDS);
++ break;
++ default:
++ return -ERANGE;
++ }
+
+ /*
+ * If being set on non-current task, delay setting the CPU
+ * mitigation until it is next scheduled.
+ */
+- if (task == current && rds != !!test_tsk_thread_flag(task, TIF_RDS))
++ if (task == current && update)
+ speculative_store_bypass_update();
+
+ return 0;
+@@ -577,7 +593,9 @@ static int ssb_prctl_get(struct task_str
+ case SPEC_STORE_BYPASS_DISABLE:
+ return PR_SPEC_DISABLE;
+ case SPEC_STORE_BYPASS_PRCTL:
+- if (test_tsk_thread_flag(task, TIF_RDS))
++ if (task_spec_ssb_force_disable(task))
++ return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
++ if (task_spec_ssb_disable(task))
+ return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+ return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+ default:
+@@ -590,9 +608,6 @@ static int ssb_prctl_get(struct task_str
+ int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+ unsigned long ctrl)
+ {
+- if (ctrl != PR_SPEC_ENABLE && ctrl != PR_SPEC_DISABLE)
+- return -ERANGE;
+-
+ switch (which) {
+ case PR_SPEC_STORE_BYPASS:
+ return ssb_prctl_set(task, ctrl);
+--- a/Documentation/userspace-api/spec_ctrl.rst
++++ b/Documentation/userspace-api/spec_ctrl.rst
+@@ -25,19 +25,21 @@ PR_GET_SPECULATION_CTRL
+ -----------------------
+
+ PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature
+-which is selected with arg2 of prctl(2). The return value uses bits 0-2 with
++which is selected with arg2 of prctl(2). The return value uses bits 0-3 with
+ the following meaning:
+
+-==== ================ ===================================================
+-Bit Define Description
+-==== ================ ===================================================
+-0 PR_SPEC_PRCTL Mitigation can be controlled per task by
+- PR_SET_SPECULATION_CTRL
+-1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
+- disabled
+-2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
+- enabled
+-==== ================ ===================================================
++==== ===================== ===================================================
++Bit Define Description
++==== ===================== ===================================================
++0 PR_SPEC_PRCTL Mitigation can be controlled per task by
++ PR_SET_SPECULATION_CTRL
++1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
++ disabled
++2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
++ enabled
++3 PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A
++ subsequent prctl(..., PR_SPEC_ENABLE) will fail.
++==== ===================== ===================================================
+
+ If all bits are 0 the CPU is not affected by the speculation misfeature.
+
+@@ -47,9 +49,11 @@ misfeature will fail.
+
+ PR_SET_SPECULATION_CTRL
+ -----------------------
++
+ PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which
+ is selected by arg2 of :manpage:`prctl(2)` per task. arg3 is used to hand
+-in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE.
++in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE or
++PR_SPEC_FORCE_DISABLE.
+
+ Common error codes
+ ------------------
+@@ -70,10 +74,13 @@ Value Meaning
+ 0 Success
+
+ ERANGE arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor
+- PR_SPEC_DISABLE
++ PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE
+
+ ENXIO Control of the selected speculation misfeature is not possible.
+ See PR_GET_SPECULATION_CTRL.
++
++EPERM Speculation was disabled with PR_SPEC_FORCE_DISABLE and caller
++ tried to enable it again.
+ ======= =================================================================
+
+ Speculation misfeature controls
+@@ -84,3 +91,4 @@ Speculation misfeature controls
+ * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, 0, 0, 0);
+ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
+ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
++ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0);
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -360,6 +360,9 @@ static inline void task_seccomp(struct s
+ case PR_SPEC_NOT_AFFECTED:
+ seq_printf(m, "not vulnerable");
+ break;
++ case PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE:
++ seq_printf(m, "thread force mitigated");
++ break;
+ case PR_SPEC_PRCTL | PR_SPEC_DISABLE:
+ seq_printf(m, "thread mitigated");
+ break;
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1294,7 +1294,8 @@ extern struct pid *cad_pid;
+ #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
+ #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
+ #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
+-
++#define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */
++#define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/
+
+ #define TASK_PFA_TEST(name, func) \
+ static inline bool task_##func(struct task_struct *p) \
+@@ -1319,6 +1320,13 @@ TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
+ TASK_PFA_SET(SPREAD_SLAB, spread_slab)
+ TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
+
++TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
++TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
++TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
++
++TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
++TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
++
+ static inline void
+ current_restore_flags(unsigned long orig_flags, unsigned long flags)
+ {
+--- a/include/uapi/linux/prctl.h
++++ b/include/uapi/linux/prctl.h
+@@ -207,5 +207,6 @@ struct prctl_mm_map {
+ # define PR_SPEC_PRCTL (1UL << 0)
+ # define PR_SPEC_ENABLE (1UL << 1)
+ # define PR_SPEC_DISABLE (1UL << 2)
++# define PR_SPEC_FORCE_DISABLE (1UL << 3)
+
+ #endif /* _LINUX_PRCTL_H */
diff --git a/patches.suse/22-seccomp-use-pr_spec_force_disable.patch b/patches.suse/22-seccomp-use-pr_spec_force_disable.patch
new file mode 100644
index 0000000000..1df62c20e7
--- /dev/null
+++ b/patches.suse/22-seccomp-use-pr_spec_force_disable.patch
@@ -0,0 +1,26 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 4 May 2018 09:40:03 +0200
+Subject: seccomp: Use PR_SPEC_FORCE_DISABLE
+Patch-mainline: not yet, queued in subsystem tree
+References: bsc#1087082 CVE-2018-3639
+
+Use PR_SPEC_FORCE_DISABLE in seccomp() because seccomp does not allow to
+widen restrictions.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ kernel/seccomp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -227,7 +227,7 @@ static inline void spec_mitigate(struct
+ int state = arch_prctl_spec_ctrl_get(task, which);
+
+ if (state > 0 && (state & PR_SPEC_PRCTL))
+- arch_prctl_spec_ctrl_set(task, which, PR_SPEC_DISABLE);
++ arch_prctl_spec_ctrl_set(task, which, PR_SPEC_FORCE_DISABLE);
+ }
+
+ static inline void seccomp_assign_mode(struct task_struct *task,
diff --git a/patches.suse/23-seccomp-add-filter-flag-to-opt-out-of-ssb-mitigation.patch b/patches.suse/23-seccomp-add-filter-flag-to-opt-out-of-ssb-mitigation.patch
new file mode 100644
index 0000000000..54764af2aa
--- /dev/null
+++ b/patches.suse/23-seccomp-add-filter-flag-to-opt-out-of-ssb-mitigation.patch
@@ -0,0 +1,129 @@
+From: Kees Cook <keescook@chromium.org>
+Date: Thu, 3 May 2018 14:56:12 -0700
+Subject: seccomp: Add filter flag to opt-out of SSB mitigation
+Patch-mainline: not yet, queued in subsystem tree
+References: bsc#1087082 CVE-2018-3639
+
+If a seccomp user is not interested in Speculative Store Bypass mitigation
+by default, it can set the new SECCOMP_FILTER_FLAG_SPEC_ALLOW flag when
+adding filters.
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ include/linux/seccomp.h | 3 ++-
+ include/uapi/linux/seccomp.h | 3 ++-
+ kernel/seccomp.c | 19 +++++++++++--------
+ tools/testing/selftests/seccomp/seccomp_bpf.c | 6 +++++-
+ 4 files changed, 20 insertions(+), 11 deletions(-)
+
+--- a/include/linux/seccomp.h
++++ b/include/linux/seccomp.h
+@@ -3,7 +3,8 @@
+
+ #include <uapi/linux/seccomp.h>
+
+-#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC)
++#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \
++ SECCOMP_FILTER_FLAG_SPEC_ALLOW)
+
+ #ifdef CONFIG_SECCOMP
+
+--- a/include/uapi/linux/seccomp.h
++++ b/include/uapi/linux/seccomp.h
+@@ -15,7 +15,8 @@
+ #define SECCOMP_SET_MODE_FILTER 1
+
+ /* Valid flags for SECCOMP_SET_MODE_FILTER */
+-#define SECCOMP_FILTER_FLAG_TSYNC 1
++#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
++#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
+
+ /*
+ * All BPF programs must return a 32-bit value.
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -231,7 +231,8 @@ static inline void spec_mitigate(struct
+ }
+
+ static inline void seccomp_assign_mode(struct task_struct *task,
+- unsigned long seccomp_mode)
++ unsigned long seccomp_mode,
++ unsigned long flags)
+ {
+ assert_spin_locked(&task->sighand->siglock);
+
+@@ -241,8 +242,9 @@ static inline void seccomp_assign_mode(s
+ * filter) is set.
+ */
+ smp_mb__before_atomic();
+- /* Assume seccomp processes want speculation flaw mitigation. */
+- spec_mitigate(task, PR_SPEC_STORE_BYPASS);
++ /* Assume default seccomp processes want spec flaw mitigation. */
++ if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
++ spec_mitigate(task, PR_SPEC_STORE_BYPASS);
+ set_tsk_thread_flag(task, TIF_SECCOMP);
+ }
+
+@@ -310,7 +312,7 @@ static inline pid_t seccomp_can_sync_thr
+ * without dropping the locks.
+ *
+ */
+-static inline void seccomp_sync_threads(void)
++static inline void seccomp_sync_threads(unsigned long flags)
+ {
+ struct task_struct *thread, *caller;
+
+@@ -351,7 +353,8 @@ static inline void seccomp_sync_threads(
+ * allow one thread to transition the other.
+ */
+ if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
+- seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
++ seccomp_assign_mode(thread, SECCOMP_MODE_FILTER,
++ flags);
+ }
+ }
+
+@@ -470,7 +473,7 @@ static long seccomp_attach_filter(unsign
+
+ /* Now that the new filter is in place, synchronize to all threads. */
+ if (flags & SECCOMP_FILTER_FLAG_TSYNC)
+- seccomp_sync_threads();
++ seccomp_sync_threads(flags);
+
+ return 0;
+ }
+@@ -745,7 +748,7 @@ static long seccomp_set_mode_strict(void
+ #ifdef TIF_NOTSC
+ disable_TSC();
+ #endif
+- seccomp_assign_mode(current, seccomp_mode);
++ seccomp_assign_mode(current, seccomp_mode, 0);
+ ret = 0;
+
+ out:
+@@ -803,7 +806,7 @@ static long seccomp_set_mode_filter(unsi
+ /* Do not free the successfully attached filter. */
+ prepared = NULL;
+
+- seccomp_assign_mode(current, seccomp_mode);
++ seccomp_assign_mode(current, seccomp_mode, flags);
+ out:
+ spin_unlock_irq(&current->sighand->siglock);
+ if (flags & SECCOMP_FILTER_FLAG_TSYNC)
+--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
+@@ -1684,7 +1684,11 @@ TEST_F_SIGNAL(TRACE_syscall, kill_after_
+ #endif
+
+ #ifndef SECCOMP_FILTER_FLAG_TSYNC
+-#define SECCOMP_FILTER_FLAG_TSYNC 1
++#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
++#endif
++
++#ifndef SECCOMP_FILTER_FLAG_SPEC_ALLOW
++#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
+ #endif
+
+ #ifndef seccomp
diff --git a/patches.suse/24-seccomp-move-speculation-migitation-control-to-arch-code.patch b/patches.suse/24-seccomp-move-speculation-migitation-control-to-arch-code.patch
new file mode 100644
index 0000000000..06ecc88aaf
--- /dev/null
+++ b/patches.suse/24-seccomp-move-speculation-migitation-control-to-arch-code.patch
@@ -0,0 +1,109 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 4 May 2018 15:12:06 +0200
+Subject: seccomp: Move speculation migitation control to arch code
+Patch-mainline: not yet, queued in subsystem tree
+References: bsc#1087082 CVE-2018-3639
+
+The migitation control is simpler to implement in architecture code as it
+avoids the extra function call to check the mode. Aside of that having an
+explicit seccomp enabled mode in the architecture mitigations would require
+even more workarounds.
+
+Move it into architecture code and provide a weak function in the seccomp
+code. Remove the 'which' argument as this allows the architecture to decide
+which mitigations are relevant for seccomp.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/bugs.c | 29 ++++++++++++++++++-----------
+ include/linux/nospec.h | 2 ++
+ kernel/seccomp.c | 15 ++-------------
+ 3 files changed, 22 insertions(+), 24 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -587,6 +587,24 @@ static int ssb_prctl_set(struct task_str
+ return 0;
+ }
+
++int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
++ unsigned long ctrl)
++{
++ switch (which) {
++ case PR_SPEC_STORE_BYPASS:
++ return ssb_prctl_set(task, ctrl);
++ default:
++ return -ENODEV;
++ }
++}
++
++#ifdef CONFIG_SECCOMP
++void arch_seccomp_spec_mitigate(struct task_struct *task)
++{
++ ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
++}
++#endif
++
+ static int ssb_prctl_get(struct task_struct *task)
+ {
+ switch (ssb_mode) {
+@@ -605,17 +623,6 @@ static int ssb_prctl_get(struct task_str
+ }
+ }
+
+-int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+- unsigned long ctrl)
+-{
+- switch (which) {
+- case PR_SPEC_STORE_BYPASS:
+- return ssb_prctl_set(task, ctrl);
+- default:
+- return -ENODEV;
+- }
+-}
+-
+ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
+ {
+ switch (which) {
+--- a/include/linux/nospec.h
++++ b/include/linux/nospec.h
+@@ -82,5 +82,7 @@ static inline unsigned long array_index_
+ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which);
+ int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+ unsigned long ctrl);
++/* Speculation control for seccomp enforced mitigation */
++void arch_seccomp_spec_mitigate(struct task_struct *task);
+
+ #endif /* _LINUX_NOSPEC_H */
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -217,18 +217,7 @@ static inline bool seccomp_may_assign_mo
+ return true;
+ }
+
+-/*
+- * If a given speculation mitigation is opt-in (prctl()-controlled),
+- * select it, by disabling speculation (enabling mitigation).
+- */
+-static inline void spec_mitigate(struct task_struct *task,
+- unsigned long which)
+-{
+- int state = arch_prctl_spec_ctrl_get(task, which);
+-
+- if (state > 0 && (state & PR_SPEC_PRCTL))
+- arch_prctl_spec_ctrl_set(task, which, PR_SPEC_FORCE_DISABLE);
+-}
++void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { }
+
+ static inline void seccomp_assign_mode(struct task_struct *task,
+ unsigned long seccomp_mode,
+@@ -244,7 +233,7 @@ static inline void seccomp_assign_mode(s
+ smp_mb__before_atomic();
+ /* Assume default seccomp processes want spec flaw mitigation. */
+ if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
+- spec_mitigate(task, PR_SPEC_STORE_BYPASS);
++ arch_seccomp_spec_mitigate(task);
+ set_tsk_thread_flag(task, TIF_SECCOMP);
+ }
+
diff --git a/patches.suse/25-x86-speculation-make-seccomp-the-default-mode-for-speculative-store-bypass.patch b/patches.suse/25-x86-speculation-make-seccomp-the-default-mode-for-speculative-store-bypass.patch
new file mode 100644
index 0000000000..be755f63d9
--- /dev/null
+++ b/patches.suse/25-x86-speculation-make-seccomp-the-default-mode-for-speculative-store-bypass.patch
@@ -0,0 +1,154 @@
+From: Kees Cook <keescook@chromium.org>
+Date: Thu, 3 May 2018 14:37:54 -0700
+Subject: x86/speculation: Make "seccomp" the default mode for Speculative Store Bypass
+Patch-mainline: not yet, queued in subsystem tree
+References: bsc#1087082 CVE-2018-3639
+
+Unless explicitly opted out of, anything running under seccomp will have
+SSB mitigations enabled. Choosing the "prctl" mode will disable this.
+
+[ tglx: Adjusted it to the new arch_seccomp_spec_mitigate() mechanism ]
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ Documentation/admin-guide/kernel-parameters.txt | 26 ++++++++++++-------
+ arch/x86/include/asm/nospec-branch.h | 1
+ arch/x86/kernel/cpu/bugs.c | 32 +++++++++++++++++-------
+ 3 files changed, 41 insertions(+), 18 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -230,6 +230,7 @@ enum ssb_mitigation {
+ SPEC_STORE_BYPASS_NONE,
+ SPEC_STORE_BYPASS_DISABLE,
+ SPEC_STORE_BYPASS_PRCTL,
++ SPEC_STORE_BYPASS_SECCOMP,
+ };
+
+ extern char __indirect_thunk_start[];
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -434,22 +434,25 @@ enum ssb_mitigation_cmd {
+ SPEC_STORE_BYPASS_CMD_AUTO,
+ SPEC_STORE_BYPASS_CMD_ON,
+ SPEC_STORE_BYPASS_CMD_PRCTL,
++ SPEC_STORE_BYPASS_CMD_SECCOMP,
+ };
+
+ static const char *ssb_strings[] = {
+ [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
+ [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
+- [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl"
++ [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
++ [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
+ };
+
+ static const struct {
+ const char *option;
+ enum ssb_mitigation_cmd cmd;
+ } ssb_mitigation_options[] = {
+- { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
+- { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
+- { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
+- { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
++ { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
++ { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
++ { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
++ { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
++ { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
+ };
+
+ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
+@@ -499,8 +502,15 @@ static enum ssb_mitigation_cmd __init __
+
+ switch (cmd) {
+ case SPEC_STORE_BYPASS_CMD_AUTO:
+- /* Choose prctl as the default mode */
+- mode = SPEC_STORE_BYPASS_PRCTL;
++ case SPEC_STORE_BYPASS_CMD_SECCOMP:
++ /*
++ * Choose prctl+seccomp as the default mode if seccomp is
++ * enabled.
++ */
++ if (IS_ENABLED(CONFIG_SECCOMP))
++ mode = SPEC_STORE_BYPASS_SECCOMP;
++ else
++ mode = SPEC_STORE_BYPASS_PRCTL;
+ break;
+ case SPEC_STORE_BYPASS_CMD_ON:
+ mode = SPEC_STORE_BYPASS_DISABLE;
+@@ -548,12 +558,14 @@ static void ssb_select_mitigation()
+ }
+
+ #undef pr_fmt
++#define pr_fmt(fmt) "Speculation prctl: " fmt
+
+ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
+ {
+ bool update;
+
+- if (ssb_mode != SPEC_STORE_BYPASS_PRCTL)
++ if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
++ ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
+ return -ENXIO;
+
+ switch (ctrl) {
+@@ -601,7 +613,8 @@ int arch_prctl_spec_ctrl_set(struct task
+ #ifdef CONFIG_SECCOMP
+ void arch_seccomp_spec_mitigate(struct task_struct *task)
+ {
+- ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
++ if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
++ ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
+ }
+ #endif
+
+@@ -610,6 +623,7 @@ static int ssb_prctl_get(struct task_str
+ switch (ssb_mode) {
+ case SPEC_STORE_BYPASS_DISABLE:
+ return PR_SPEC_DISABLE;
++ case SPEC_STORE_BYPASS_SECCOMP:
+ case SPEC_STORE_BYPASS_PRCTL:
+ if (task_spec_ssb_force_disable(task))
+ return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -3928,19 +3928,27 @@
+ This parameter controls whether the Speculative Store
+ Bypass optimization is used.
+
+- on - Unconditionally disable Speculative Store Bypass
+- off - Unconditionally enable Speculative Store Bypass
+- auto - Kernel detects whether the CPU model contains an
+- implementation of Speculative Store Bypass and
+- picks the most appropriate mitigation.
+- prctl - Control Speculative Store Bypass per thread
+- via prctl. Speculative Store Bypass is enabled
+- for a process by default. The state of the control
+- is inherited on fork.
++ on - Unconditionally disable Speculative Store Bypass
++ off - Unconditionally enable Speculative Store Bypass
++ auto - Kernel detects whether the CPU model contains an
++ implementation of Speculative Store Bypass and
++ picks the most appropriate mitigation. If the
++ CPU is not vulnerable, "off" is selected. If the
++ CPU is vulnerable the default mitigation is
++ architecture and Kconfig dependent. See below.
++ prctl - Control Speculative Store Bypass per thread
++ via prctl. Speculative Store Bypass is enabled
++ for a process by default. The state of the control
++ is inherited on fork.
++ seccomp - Same as "prctl" above, but all seccomp threads
++ will disable SSB unless they explicitly opt out.
+
+ Not specifying this option is equivalent to
+ spec_store_bypass_disable=auto.
+
++ Default mitigations:
++ X86: If CONFIG_SECCOMP=y "seccomp", otherwise "prctl"
++
+ spia_io_base= [HW,MTD]
+ spia_fio_base=
+ spia_pedr=
diff --git a/patches.suse/26-x86-bugs-rename-rds-to-ssbd.patch b/patches.suse/26-x86-bugs-rename-rds-to-ssbd.patch
new file mode 100644
index 0000000000..76c6e02392
--- /dev/null
+++ b/patches.suse/26-x86-bugs-rename-rds-to-ssbd.patch
@@ -0,0 +1,377 @@
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 9 May 2018 21:41:38 +0200
+Subject: x86/bugs: Rename _RDS to _SSBD
+Patch-mainline: not yet, queued in subsystem tree
+References: bsc#1087082 CVE-2018-3639
+
+Intel collateral will reference the SSB mitigation bit in IA32_SPEC_CTL[2]
+as SSBD (Speculative Store Bypass Disable).
+
+Hence changing it.
+
+It is unclear yet what the MSR_IA32_ARCH_CAPABILITIES (0x10a) Bit(4) name
+is going to be. Following the rename it would be SSBD_NO but that rolls out
+to Speculative Store Bypass Disable No.
+
+Also fixed the missing space in X86_FEATURE_AMD_SSBD.
+
+[ tglx: Fixup x86_amd_rds_enable() and rds_tif_to_amd_ls_cfg() as well ]
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/cpufeatures.h | 4 ++--
+ arch/x86/include/asm/msr-index.h | 10 +++++-----
+ arch/x86/include/asm/spec-ctrl.h | 12 ++++++------
+ arch/x86/include/asm/thread_info.h | 6 +++---
+ arch/x86/kernel/cpu/amd.c | 14 +++++++-------
+ arch/x86/kernel/cpu/bugs.c | 36 ++++++++++++++++++------------------
+ arch/x86/kernel/cpu/common.c | 2 +-
+ arch/x86/kernel/cpu/intel.c | 2 +-
+ arch/x86/kernel/process.c | 8 ++++----
+ arch/x86/kvm/cpuid.c | 2 +-
+ arch/x86/kvm/vmx.c | 6 +++---
+ 11 files changed, 51 insertions(+), 51 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -216,7 +216,7 @@
+ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
+ #define X86_FEATURE_USE_IBRS ( 7*32+23) /* "" Use IBRS for Spectre v2 safety */
+ #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+24) /* "" Disable Speculative Store Bypass. */
+-#define X86_FEATURE_AMD_RDS (7*32+25) /* "" AMD RDS implementation */
++#define X86_FEATURE_AMD_SSBD (7*32+25) /* "" AMD SSBD implementation */
+
+ /* Virtualization flags: Linux defined, word 8 */
+ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
+@@ -333,7 +333,7 @@
+ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
+ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
+ #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+-#define X86_FEATURE_RDS (18*32+31) /* Reduced Data Speculation */
++#define X86_FEATURE_SSBD (18*32+31) /* Speculative Store Bypass Disable */
+
+ /*
+ * BUG word(s)
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -41,8 +41,8 @@
+ #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
+ #define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
+ #define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
+-#define SPEC_CTRL_RDS_SHIFT 2 /* Reduced Data Speculation bit */
+-#define SPEC_CTRL_RDS (1 << SPEC_CTRL_RDS_SHIFT) /* Reduced Data Speculation */
++#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
++#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
+
+ #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
+ #define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
+@@ -69,10 +69,10 @@
+ #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
+ #define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
+ #define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
+-#define ARCH_CAP_RDS_NO (1 << 4) /*
++#define ARCH_CAP_SSBD_NO (1 << 4) /*
+ * Not susceptible to Speculative Store Bypass
+- * attack, so no Reduced Data Speculation control
+- * required.
++ * attack, so no Speculative Store Bypass
++ * control required.
+ */
+
+ #define MSR_IA32_BBL_CR_CTL 0x00000119
+--- a/arch/x86/include/asm/spec-ctrl.h
++++ b/arch/x86/include/asm/spec-ctrl.h
+@@ -17,20 +17,20 @@ extern void x86_spec_ctrl_restore_host(u
+
+ /* AMD specific Speculative Store Bypass MSR data */
+ extern u64 x86_amd_ls_cfg_base;
+-extern u64 x86_amd_ls_cfg_rds_mask;
++extern u64 x86_amd_ls_cfg_ssbd_mask;
+
+ /* The Intel SPEC CTRL MSR base value cache */
+ extern u64 x86_spec_ctrl_base;
+
+-static inline u64 rds_tif_to_spec_ctrl(u64 tifn)
++static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
+ {
+- BUILD_BUG_ON(TIF_RDS < SPEC_CTRL_RDS_SHIFT);
+- return (tifn & _TIF_RDS) >> (TIF_RDS - SPEC_CTRL_RDS_SHIFT);
++ BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
++ return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
+ }
+
+-static inline u64 rds_tif_to_amd_ls_cfg(u64 tifn)
++static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
+ {
+- return (tifn & _TIF_RDS) ? x86_amd_ls_cfg_rds_mask : 0ULL;
++ return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
+ }
+
+ extern void speculative_store_bypass_update(void);
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -80,7 +80,7 @@ struct thread_info {
+ #define TIF_SIGPENDING 2 /* signal pending */
+ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+ #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
+-#define TIF_RDS 5 /* Reduced data speculation */
++#define TIF_SSBD 5 /* Reduced data speculation */
+ #define TIF_SYSCALL_EMU 6 /* syscall emulation active */
+ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
+ #define TIF_SECCOMP 8 /* secure computing */
+@@ -106,7 +106,7 @@ struct thread_info {
+ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+ #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+ #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
+-#define _TIF_RDS (1 << TIF_RDS)
++#define _TIF_SSBD (1 << TIF_SSBD)
+ #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
+ #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+ #define _TIF_SECCOMP (1 << TIF_SECCOMP)
+@@ -144,7 +144,7 @@ struct thread_info {
+
+ /* flags to check in __switch_to() */
+ #define _TIF_WORK_CTXSW \
+- (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_RDS)
++ (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
+
+ #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
+ #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -566,12 +566,12 @@ static void bsp_init_amd(struct cpuinfo_
+ }
+ /*
+ * Try to cache the base value so further operations can
+- * avoid RMW. If that faults, do not enable RDS.
++ * avoid RMW. If that faults, do not enable SSBD.
+ */
+ if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
+- setup_force_cpu_cap(X86_FEATURE_RDS);
+- setup_force_cpu_cap(X86_FEATURE_AMD_RDS);
+- x86_amd_ls_cfg_rds_mask = 1ULL << bit;
++ setup_force_cpu_cap(X86_FEATURE_SSBD);
++ setup_force_cpu_cap(X86_FEATURE_AMD_SSBD);
++ x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
+ }
+ }
+ }
+@@ -908,9 +908,9 @@ static void init_amd(struct cpuinfo_x86
+ if (!cpu_has(c, X86_FEATURE_XENPV))
+ set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+
+- if (boot_cpu_has(X86_FEATURE_AMD_RDS)) {
+- set_cpu_cap(c, X86_FEATURE_RDS);
+- set_cpu_cap(c, X86_FEATURE_AMD_RDS);
++ if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) {
++ set_cpu_cap(c, X86_FEATURE_SSBD);
++ set_cpu_cap(c, X86_FEATURE_AMD_SSBD);
+ }
+ }
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -44,10 +44,10 @@ static u64 __ro_after_init x86_spec_ctrl
+
+ /*
+ * AMD specific MSR info for Speculative Store Bypass control.
+- * x86_amd_ls_cfg_rds_mask is initialized in identify_boot_cpu().
++ * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
+ */
+ u64 __ro_after_init x86_amd_ls_cfg_base;
+-u64 __ro_after_init x86_amd_ls_cfg_rds_mask;
++u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
+
+ void __init check_bugs(void)
+ {
+@@ -147,7 +147,7 @@ u64 x86_spec_ctrl_get_default(void)
+ u64 msrval = x86_spec_ctrl_base;
+
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+- msrval |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
++ msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+ return msrval;
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
+@@ -160,7 +160,7 @@ void x86_spec_ctrl_set_guest(u64 guest_s
+ return;
+
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+- host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
++ host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+
+ if (host != guest_spec_ctrl)
+ wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
+@@ -175,18 +175,18 @@ void x86_spec_ctrl_restore_host(u64 gues
+ return;
+
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+- host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
++ host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+
+ if (host != guest_spec_ctrl)
+ wrmsrl(MSR_IA32_SPEC_CTRL, host);
+ }
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
+
+-static void x86_amd_rds_enable(void)
++static void x86_amd_ssb_disable(void)
+ {
+- u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_rds_mask;
++ u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
+
+- if (boot_cpu_has(X86_FEATURE_AMD_RDS))
++ if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
+ wrmsrl(MSR_AMD64_LS_CFG, msrval);
+ }
+
+@@ -491,7 +491,7 @@ static enum ssb_mitigation_cmd __init __
+ enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
+ enum ssb_mitigation_cmd cmd;
+
+- if (!boot_cpu_has(X86_FEATURE_RDS))
++ if (!boot_cpu_has(X86_FEATURE_SSBD))
+ return mode;
+
+ cmd = ssb_parse_cmdline();
+@@ -525,7 +525,7 @@ static enum ssb_mitigation_cmd __init __
+ /*
+ * We have three CPU feature flags that are in play here:
+ * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
+- * - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass
++ * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
+ * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
+ */
+ if (mode == SPEC_STORE_BYPASS_DISABLE) {
+@@ -536,12 +536,12 @@ static enum ssb_mitigation_cmd __init __
+ */
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_INTEL:
+- x86_spec_ctrl_base |= SPEC_CTRL_RDS;
+- x86_spec_ctrl_mask &= ~SPEC_CTRL_RDS;
+- x86_spec_ctrl_set(SPEC_CTRL_RDS);
++ x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
++ x86_spec_ctrl_mask &= ~SPEC_CTRL_SSBD;
++ x86_spec_ctrl_set(SPEC_CTRL_SSBD);
+ break;
+ case X86_VENDOR_AMD:
+- x86_amd_rds_enable();
++ x86_amd_ssb_disable();
+ break;
+ }
+ }
+@@ -574,16 +574,16 @@ static int ssb_prctl_set(struct task_str
+ if (task_spec_ssb_force_disable(task))
+ return -EPERM;
+ task_clear_spec_ssb_disable(task);
+- update = test_and_clear_tsk_thread_flag(task, TIF_RDS);
++ update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
+ break;
+ case PR_SPEC_DISABLE:
+ task_set_spec_ssb_disable(task);
+- update = !test_and_set_tsk_thread_flag(task, TIF_RDS);
++ update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+ break;
+ case PR_SPEC_FORCE_DISABLE:
+ task_set_spec_ssb_disable(task);
+ task_set_spec_ssb_force_disable(task);
+- update = !test_and_set_tsk_thread_flag(task, TIF_RDS);
++ update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+ break;
+ default:
+ return -ERANGE;
+@@ -653,7 +653,7 @@ void x86_spec_ctrl_setup_ap(void)
+ x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
+
+ if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
+- x86_amd_rds_enable();
++ x86_amd_ssb_disable();
+ }
+
+ #ifdef CONFIG_SYSFS
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -926,7 +926,7 @@ static void __init cpu_set_bug_bits(stru
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+
+ if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
+- !(ia32_cap & ARCH_CAP_RDS_NO))
++ !(ia32_cap & ARCH_CAP_SSBD_NO))
+ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
+ if (x86_match_cpu(cpu_no_speculation))
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -188,7 +188,7 @@ static void early_init_intel(struct cpui
+ setup_clear_cpu_cap(X86_FEATURE_STIBP);
+ setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
+ setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
+- setup_clear_cpu_cap(X86_FEATURE_RDS);
++ setup_clear_cpu_cap(X86_FEATURE_SSBD);
+ }
+
+ /*
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -283,11 +283,11 @@ static __always_inline void __speculativ
+ {
+ u64 msr;
+
+- if (static_cpu_has(X86_FEATURE_AMD_RDS)) {
+- msr = x86_amd_ls_cfg_base | rds_tif_to_amd_ls_cfg(tifn);
++ if (static_cpu_has(X86_FEATURE_AMD_SSBD)) {
++ msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
+ wrmsrl(MSR_AMD64_LS_CFG, msr);
+ } else {
+- msr = x86_spec_ctrl_base | rds_tif_to_spec_ctrl(tifn);
++ msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
+ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+ }
+ }
+@@ -329,7 +329,7 @@ void __switch_to_xtra(struct task_struct
+ if ((tifp ^ tifn) & _TIF_NOCPUID)
+ set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
+
+- if ((tifp ^ tifn) & _TIF_RDS)
++ if ((tifp ^ tifn) & _TIF_SSBD)
+ __speculative_store_bypass_update(tifn);
+ }
+
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -394,7 +394,7 @@ static inline int __do_cpuid_ent(struct
+
+ /* cpuid 7.0.edx*/
+ const u32 kvm_cpuid_7_0_edx_x86_features =
+- F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(RDS) |
++ F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(SSBD) |
+ F(ARCH_CAPABILITIES);
+
+ /* all calls to cpuid_count() should be made on the same cpu */
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -3298,7 +3298,7 @@ static int vmx_get_msr(struct kvm_vcpu *
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
+- !guest_cpuid_has(vcpu, X86_FEATURE_RDS))
++ !guest_cpuid_has(vcpu, X86_FEATURE_SSBD))
+ return 1;
+
+ msr_info->data = to_vmx(vcpu)->spec_ctrl;
+@@ -3420,11 +3420,11 @@ static int vmx_set_msr(struct kvm_vcpu *
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
+- !guest_cpuid_has(vcpu, X86_FEATURE_RDS))
++ !guest_cpuid_has(vcpu, X86_FEATURE_SSBD))
+ return 1;
+
+ /* The STIBP bit doesn't fault even if it's not advertised */
+- if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_RDS))
++ if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
+ return 1;
+
+ vmx->spec_ctrl = data;
diff --git a/patches.suse/27-proc-use-underscores-for-ssbd-in-status.patch b/patches.suse/27-proc-use-underscores-for-ssbd-in-status.patch
new file mode 100644
index 0000000000..895f9de271
--- /dev/null
+++ b/patches.suse/27-proc-use-underscores-for-ssbd-in-status.patch
@@ -0,0 +1,27 @@
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 9 May 2018 21:41:38 +0200
+Subject: proc: Use underscores for SSBD in 'status'
+Patch-mainline: not yet, queued in subsystem tree
+References: bsc#1087082 CVE-2018-3639
+
+The style for the 'status' file is CamelCase or this. _.
+
+Fixes: fae1fa0fc ("proc: Provide details on speculation flaw mitigations")
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ fs/proc/array.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -352,7 +352,7 @@ static inline void task_seccomp(struct s
+ #ifdef CONFIG_SECCOMP
+ seq_put_decimal_ull(m, "\nSeccomp:\t", p->seccomp.mode);
+ #endif
+- seq_printf(m, "\nSpeculation Store Bypass:\t");
++ seq_printf(m, "\nSpeculation_Store_Bypass:\t");
+ switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
+ case -EINVAL:
+ seq_printf(m, "unknown");
diff --git a/patches.suse/bpf-prevent-memory-disambiguation-attack.patch b/patches.suse/bpf-prevent-memory-disambiguation-attack.patch
new file mode 100644
index 0000000000..a1b940b52b
--- /dev/null
+++ b/patches.suse/bpf-prevent-memory-disambiguation-attack.patch
@@ -0,0 +1,134 @@
+From: Alexei Starovoitov <ast@kernel.org>
+Date: Tue, 15 May 2018 09:27:05 -0700
+Subject: [PATCH] bpf: prevent memory disambiguation attack
+References: bsc#1087082 CVE-2018-3639
+Patch-mainline: Not yet, under development
+
+Detect code patterns where malicious 'speculative store bypass' can be used
+and sanitize such patterns.
+
+ 39: (bf) r3 = r10
+ 40: (07) r3 += -216
+ 41: (79) r8 = *(u64 *)(r7 +0) // slow read
+ 42: (7a) *(u64 *)(r10 -72) = 0 // verifier inserts this instruction
+ 43: (7b) *(u64 *)(r8 +0) = r3 // this store becomes slow due to r8
+ 44: (79) r1 = *(u64 *)(r6 +0) // cpu speculatively executes this load
+ 45: (71) r2 = *(u8 *)(r1 +0) // speculatively arbitrary 'load byte'
+ // is now sanitized
+
+Above code after x86 JIT becomes:
+ e5: mov %rbp,%rdx
+ e8: add $0xffffffffffffff28,%rdx
+ ef: mov 0x0(%r13),%r14
+ f3: movq $0x0,-0x48(%rbp)
+ fb: mov %rdx,0x0(%r14)
+ ff: mov 0x0(%rbx),%rdi
+103: movzbq 0x0(%rdi),%rsi
+
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ include/linux/bpf_verifier.h | 1
+ kernel/bpf/verifier.c | 59 ++++++++++++++++++++++++++++++++++++++++---
+ 2 files changed, 57 insertions(+), 3 deletions(-)
+
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -115,6 +115,7 @@ struct bpf_insn_aux_data {
+ struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
+ };
+ int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
++ int sanitize_stack_off; /* stack slot to be cleared */
+ bool seen; /* this insn was processed by the verifier */
+ };
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -658,7 +658,7 @@ static bool is_spillable_regtype(enum bp
+ */
+ static int check_stack_write(struct bpf_verifier_env *env,
+ struct bpf_verifier_state *state, int off,
+- int size, int value_regno)
++ int size, int value_regno, int insn_idx)
+ {
+ int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
+
+@@ -689,8 +689,33 @@ static int check_stack_write(struct bpf_
+ state->stack[spi].spilled_ptr = state->regs[value_regno];
+ state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
+
+- for (i = 0; i < BPF_REG_SIZE; i++)
++ for (i = 0; i < BPF_REG_SIZE; i++) {
++ if (state->stack[spi].slot_type[i] == STACK_MISC &&
++ !env->allow_ptr_leaks) {
++ int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
++ int soff = (-spi - 1) * BPF_REG_SIZE;
++
++ /* detected reuse of integer stack slot with a pointer
++ * which means either llvm is reusing stack slot or
++ * an attacker is trying to exploit CVE-2018-3639
++ * (speculative store bypass)
++ * Have to sanitize that slot with preemptive
++ * store of zero.
++ */
++ if (*poff && *poff != soff) {
++ /* disallow programs where single insn stores
++ * into two different stack slots, since verifier
++ * cannot sanitize them
++ */
++ verbose(env,
++ "insn %d cannot access two stack slots fp%d and fp%d",
++ insn_idx, *poff, soff);
++ return -EINVAL;
++ }
++ *poff = soff;
++ }
+ state->stack[spi].slot_type[i] = STACK_SPILL;
++ }
+ } else {
+ /* regular write of data into stack */
+ state->stack[spi].spilled_ptr = (struct bpf_reg_state) {};
+@@ -1181,7 +1206,7 @@ static int check_mem_access(struct bpf_v
+
+ if (t == BPF_WRITE)
+ err = check_stack_write(env, state, off, size,
+- value_regno);
++ value_regno, insn_idx);
+ else
+ err = check_stack_read(env, state, off, size,
+ value_regno);
+@@ -4232,6 +4257,34 @@ static int convert_ctx_accesses(struct b
+ else
+ continue;
+
++ if (type == BPF_WRITE &&
++ env->insn_aux_data[i + delta].sanitize_stack_off) {
++ struct bpf_insn patch[] = {
++ /* Sanitize suspicious stack slot with zero.
++ * There are no memory dependencies for this store,
++ * since it's only using frame pointer and immediate
++ * constant of zero
++ */
++ BPF_ST_MEM(BPF_DW, BPF_REG_FP,
++ env->insn_aux_data[i + delta].sanitize_stack_off,
++ 0),
++ /* the original STX instruction will immediately
++ * overwrite the same stack slot with appropriate value
++ */
++ *insn,
++ };
++
++ cnt = ARRAY_SIZE(patch);
++ new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
++ if (!new_prog)
++ return -ENOMEM;
++
++ delta += cnt - 1;
++ env->prog = new_prog;
++ insn = new_prog->insnsi + i + delta;
++ continue;
++ }
++
+ if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
+ continue;
+
diff --git a/series.conf b/series.conf
index ba3bb1d47a..05e1df3d61 100644
--- a/series.conf
+++ b/series.conf
@@ -12050,6 +12050,7 @@
patches.arch/powerpc-Use-barrier_nospec-in-copy_from_user.patch
patches.arch/powerpc-64-Use-barrier_nospec-in-syscall-entry.patch
patches.suse/powerpc-64s-Enhance-the-information-in-cpu_show_spec.patch
+ patches.arch/powerpc-64s-Add-support-for-a-store-forwarding-barri.patch
patches.arch/KVM-PPC-Book3S-HV-Disable-tb_offset.patch
########################################################
@@ -12438,6 +12439,38 @@
patches.kabi/musb-flush_irq_work-kabi-fix.patch
patches.kabi/0003-md-fix-md_write_start-deadlock-w-o-metadata-devices.kabi
+ # SSB
+ patches.suse/01-x86-nospec-simplify-alternative_msr_write.patch
+ patches.suse/02-x86-bugs-concentrate-bug-detection-into-a-separate-function.patch
+ patches.suse/03-x86-bugs-concentrate-bug-reporting-into-a-separate-function.patch
+ patches.suse/04-x86-bugs-read-spec_ctrl-msr-during-boot-and-re-use-reserved-bits.patch
+ patches.suse/05-x86-bugs-kvm-support-the-combination-of-guest-and-host-ibrs.patch
+ patches.suse/06-x86-bugs-expose-sys-spec_store_bypass.patch
+ patches.suse/07-x86-cpufeatures-add-x86_feature_rds.patch
+ patches.suse/08-x86-bugs-provide-boot-parameters-for-the-spec_store_bypass_disable-mitigation.patch
+ patches.suse/09-x86-bugs-intel-set-proper-cpu-features-and-setup-rds.patch
+ patches.suse/10-x86-bugs-whitelist-allowed-spec_ctrl-msr-values.patch
+ patches.suse/11-x86-bugs-amd-add-support-to-disable-rds-on-famh-if-requested.patch
+ patches.suse/12-x86-kvm-vmx-expose-spec_ctrl-bit2-to-the-guest.patch
+ patches.suse/13-x86-speculation-create-spec-ctrl-h-to-avoid-include-hell.patch
+ patches.suse/14-prctl-add-speculation-control-prctls.patch
+ patches.suse/15-x86-process-allow-runtime-control-of-speculative-store-bypass.patch
+ patches.suse/16-x86-speculation-add-prctl-for-speculative-store-bypass-mitigation.patch
+ patches.suse/17-nospec-allow-getting-setting-on-non-current-task.patch
+ patches.suse/18-proc-provide-details-on-speculation-flaw-mitigations.patch
+ patches.suse/19-seccomp-enable-speculation-flaw-mitigations.patch
+ patches.suse/20-x86-bugs-make-boot-modes-_ro_after_init.patch
+ patches.suse/21-prctl-add-force-disable-speculation.patch
+ patches.suse/22-seccomp-use-pr_spec_force_disable.patch
+ patches.suse/23-seccomp-add-filter-flag-to-opt-out-of-ssb-mitigation.patch
+ patches.suse/24-seccomp-move-speculation-migitation-control-to-arch-code.patch
+ patches.suse/25-x86-speculation-make-seccomp-the-default-mode-for-speculative-store-bypass.patch
+ patches.suse/26-x86-bugs-rename-rds-to-ssbd.patch
+ patches.suse/27-proc-use-underscores-for-ssbd-in-status.patch
+ patches.arch/KVM--SVM--Move-spec-control-call-after-restore-of-GS.patch
+ patches.suse/bpf-prevent-memory-disambiguation-attack.patch
+ patches.kabi/bpf-prevent-memory-disambiguation-attack.patch
+
########################################################
# You'd better have a good reason for adding a patch
# below here.