Home Home > GIT Browse > openSUSE-15.0
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2018-05-23 08:57:07 +0200
committerTakashi Iwai <tiwai@suse.de>2018-05-23 08:57:07 +0200
commit036cd2ff68072c920f8fec63c2c29c727eca94d9 (patch)
treec544bd2a4c61f3b31e36e86b0d0060c8d694ec66
parent51b34053770bf1bc30ecb16a86376860d6fe7463 (diff)
parente260091ada1c7a5fa550481e001c4a10d9fe7eed (diff)
Merge branch 'SLE12-SP3' into openSUSE-42.3rpm-4.4.132-53
-rw-r--r--patches.suse/0004-x86-bugs-centralize-SPEC_CTRL-MSR-mask-generation-FIX-SLE12.patch25
-rw-r--r--patches.suse/01-bpf-add-enter_exit-markers.patch53
-rw-r--r--patches.suse/02-bpf-track-entry-to-and-exit-from-BFP-code.patch117
-rw-r--r--patches.suse/03-bpf-use-reduced-speculation-mitigations.patch101
-rw-r--r--patches.suse/04-bpf-x86-bugs-centralize-SPEC_CTRL-MSR-mask-generation.patch102
-rw-r--r--patches.suse/05-bpf-x86-implement-reduced-speculation-when-running-BPF.patch184
-rw-r--r--patches.suse/26-x86-bugs-rename-rds-to-ssbd.patch49
-rw-r--r--patches.suse/bpf-prevent-memory-disambiguation-attack.patch208
-rw-r--r--series.conf10
9 files changed, 234 insertions, 615 deletions
diff --git a/patches.suse/0004-x86-bugs-centralize-SPEC_CTRL-MSR-mask-generation-FIX-SLE12.patch b/patches.suse/0004-x86-bugs-centralize-SPEC_CTRL-MSR-mask-generation-FIX-SLE12.patch
deleted file mode 100644
index fc5a2c34a6..0000000000
--- a/patches.suse/0004-x86-bugs-centralize-SPEC_CTRL-MSR-mask-generation-FIX-SLE12.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-From: Jiri Kosina <jkosina@suse.cz>
-Subject: [PATCH] x86/bugs: make intel_rds_mask() honor X86_FEATURE_SSBD
-Patch-mainline: Not yet, under development
-References: bsc#1094019
-
-Don't return SSBD mask from x86_calculate_kernel_spec_ctrl() if feature
-bit is not set.
-
-Signed-off-by: Jiri Kosina <jkosina@suse.cz>
----
- arch/x86/kernel/cpu/bugs.c | 3 +++
- 1 file changed, 3 insertions(+)
-
---- a/arch/x86/kernel/cpu/bugs.c
-+++ b/arch/x86/kernel/cpu/bugs.c
-@@ -158,6 +158,9 @@ static inline u64 intel_rds_mask(void)
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
- return 0;
-
-+ if (!boot_cpu_has(X86_FEATURE_SSBD))
-+ return 0;
-+
- mask = ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
-
- /*
diff --git a/patches.suse/01-bpf-add-enter_exit-markers.patch b/patches.suse/01-bpf-add-enter_exit-markers.patch
deleted file mode 100644
index cfb2bc7609..0000000000
--- a/patches.suse/01-bpf-add-enter_exit-markers.patch
+++ /dev/null
@@ -1,53 +0,0 @@
-From: Dave Hansen <dave.hansen@linux.intel.com>
-Subject: bpf: add enter/exit markers
-Patch-mainline: Not yet, work in progress
-References: bsc#1087082 CVE-2018-3639
-
-BPF code is often supplied from outside the kernel. While it can
-be programmatically verified, it is very difficult to verify
-potential effects from speculative execution.
-
-This patch adds some marker functions as BFP code is entered or
-exited. These serve only a stubs for now.
-
-There are many possibilities for optimization. The BFP programs
-that run on devices, for instance, are less likely to need any
-CPU-based mitigations. These patches are an entirely unoptimized
-first pass.
-
-Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
-Cc: Andi Kleen <ak@linux.intel.com>
-Cc: Tim Chen <tim.c.chen@linux.intel.com>
-Signed-off-by: Jiri Kosina <jkosina@suse.cz>
----
- include/linux/filter.h | 18 +++++++++++++++++-
- 1 file changed, 17 insertions(+), 1 deletion(-)
-
---- a/include/linux/filter.h
-+++ b/include/linux/filter.h
-@@ -348,8 +348,24 @@ struct sk_filter {
- struct bpf_prog *prog;
- };
-
--#define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
-+static inline void bpf_enter_prog(const struct bpf_prog *fp)
-+{
-+}
-+
-+static inline void bpf_leave_prog(const struct bpf_prog *fp)
-+{
-+}
-
-+#define BPF_PROG_RUN(filter, ctx) ({ \
-+ int __ret; \
-+ \
-+ bpf_enter_prog(filter); \
-+ __ret = (*(filter)->bpf_func)(ctx, (filter)->insnsi); \
-+ bpf_leave_prog(filter); \
-+ \
-+ __ret; \
-+})
-+
- static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
- struct sk_buff *skb)
- {
diff --git a/patches.suse/02-bpf-track-entry-to-and-exit-from-BFP-code.patch b/patches.suse/02-bpf-track-entry-to-and-exit-from-BFP-code.patch
deleted file mode 100644
index 300f1ac8de..0000000000
--- a/patches.suse/02-bpf-track-entry-to-and-exit-from-BFP-code.patch
+++ /dev/null
@@ -1,117 +0,0 @@
-From: Dave Hansen <dave.hansen@linux.intel.com>
-Subject: bpf: track entry to and exit from BFP code
-Patch-mainline: Not yet, work in progress
-References: bsc#1087082 CVE-2018-3639
-
-Now that we have hooks called when we enter/exit the BFP code, tracks
-when we enter/leave. We "leave" lazily. The first time we leave, we
-schedule some work to do the actual "leave" at some point in the future.
-This way, we do not thrash by enabling and disabling mitigations
-frequently.
-
-This means that the per-BPF-program overhead is hopefully just the
-cost of incrementing and decrementing a per-cpu variable.
-
-The per-cpu counter 'bpf_prog_active' looks superficially like a great
-mechanism to use. However, it does not track active BPF programs.
-It appears to just be active when eprobe BPF handlers are running.
-
-Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
-Cc: Andi Kleen <ak@linux.intel.com>
-Cc: Tim Chen <tim.c.chen@linux.intel.com>
-Signed-off-by: Jiri Kosina <jkosina@suse.cz>
----
- include/linux/filter.h | 11 +++++++++
- net/core/filter.c | 58 +++++++++++++++++++++++++++++++++++++++++++++++++
- 2 files changed, 69 insertions(+)
-
---- a/include/linux/filter.h
-+++ b/include/linux/filter.h
-@@ -348,12 +348,23 @@ struct sk_filter {
- struct bpf_prog *prog;
- };
-
-+DECLARE_PER_CPU(unsigned int, bpf_prog_ran);
-+
- static inline void bpf_enter_prog(const struct bpf_prog *fp)
- {
-+ int *count = &get_cpu_var(bpf_prog_ran);
-+ (*count)++;
- }
-
-+extern void bpf_leave_prog_deferred(const struct bpf_prog *fp);
- static inline void bpf_leave_prog(const struct bpf_prog *fp)
- {
-+ int *count = this_cpu_ptr(&bpf_prog_ran);
-+ if (*count == 1)
-+ bpf_leave_prog_deferred(fp);
-+ else
-+ (*count)--;
-+ put_cpu_var(bpf_prog_ran);
- }
-
- #define BPF_PROG_RUN(filter, ctx) ({ \
---- a/net/core/filter.c
-+++ b/net/core/filter.c
-@@ -1999,3 +1999,61 @@ out:
- release_sock(sk);
- return ret;
- }
-+
-+/*
-+ * 0 when no BPF code has executed on the CPU.
-+ * Incremented when running BPF code.
-+ * When ==1, work will be scheduled.
-+ * When >1, work will not be scheduled because work is already
-+ * scheduled.
-+ * When work is performed, count will be decremented from 1->0.
-+ */
-+DEFINE_PER_CPU(unsigned int, bpf_prog_ran);
-+EXPORT_SYMBOL_GPL(bpf_prog_ran);
-+static void bpf_done_on_this_cpu(struct work_struct *work)
-+{
-+ if (!this_cpu_dec_return(bpf_prog_ran))
-+ return;
-+
-+ /*
-+ * This is unexpected. The elevated refcount indicates
-+ * being in the *middle* of a BPF program, which should
-+ * be impossible. They are executed inside
-+ * rcu_read_lock() where we can not sleep and where
-+ * preemption is disabled.
-+ */
-+ WARN_ON_ONCE(1);
-+}
-+
-+DEFINE_PER_CPU(struct delayed_work, bpf_prog_delayed_work);
-+static __init int bpf_init_delayed_work(void)
-+{
-+ int i;
-+
-+ for_each_possible_cpu(i) {
-+ struct delayed_work *w = &per_cpu(bpf_prog_delayed_work, i);
-+
-+ INIT_DELAYED_WORK(w, bpf_done_on_this_cpu);
-+ }
-+ return 0;
-+}
-+subsys_initcall(bpf_init_delayed_work);
-+
-+/*
-+ * Must be called with preempt disabled
-+ *
-+ * The schedule_delayed_work_on() is relatively expensive. So,
-+ * this way, someone doing a bunch of repeated BPF calls will
-+ * only pay the cost of scheduling work on the *first* BPF call.
-+ * The subsequent calls only pay the cost of incrementing a
-+ * per-cpu variable, which is cheap.
-+ */
-+void bpf_leave_prog_deferred(const struct bpf_prog *fp)
-+{
-+ int cpu = smp_processor_id();
-+ struct delayed_work *w = &per_cpu(bpf_prog_delayed_work, cpu);
-+ unsigned long delay_jiffies = msecs_to_jiffies(10);
-+
-+ schedule_delayed_work_on(cpu, w, delay_jiffies);
-+}
-+EXPORT_SYMBOL_GPL(bpf_leave_prog_deferred);
diff --git a/patches.suse/03-bpf-use-reduced-speculation-mitigations.patch b/patches.suse/03-bpf-use-reduced-speculation-mitigations.patch
deleted file mode 100644
index f92c4a8127..0000000000
--- a/patches.suse/03-bpf-use-reduced-speculation-mitigations.patch
+++ /dev/null
@@ -1,101 +0,0 @@
-From: Dave Hansen <dave.hansen@linux.intel.com>
-Subject: bpf: use reduced speculation mitigations
-Patch-mainline: Not yet, work in progress
-References: bsc#1087082 CVE-2018-3639
-
-The previous patches put in place the infrastructure to tell when
-BPF code is running. Now, we hook into that code to call out to
-some architecture-specific code which will implement those
-mitigationse
-
-Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
-Cc: Andi Kleen <ak@linux.intel.com>
-Cc: Tim Chen <tim.c.chen@linux.intel.com>
-Signed-off-by: Jiri Kosina <jkosina@suse.cz>
----
- include/linux/filter.h | 7 +++++++
- include/linux/nospec.h | 9 +++++++++
- net/core/filter.c | 23 +++++++++++++++--------
- 3 files changed, 31 insertions(+), 8 deletions(-)
-
---- a/include/linux/filter.h
-+++ b/include/linux/filter.h
-@@ -13,6 +13,7 @@
- #include <linux/printk.h>
- #include <linux/workqueue.h>
- #include <linux/sched.h>
-+#include <linux/nospec.h>
- #include <net/sch_generic.h>
-
- #include <asm/cacheflush.h>
-@@ -354,6 +355,12 @@ static inline void bpf_enter_prog(const
- {
- int *count = &get_cpu_var(bpf_prog_ran);
- (*count)++;
-+ /*
-+ * Upon the first entry to BPF code, we need to reduce
-+ * memory speculation to mitigate attacks targeting it.
-+ */
-+ if (*count == 1)
-+ cpu_enter_reduced_memory_speculation();
- }
-
- extern void bpf_leave_prog_deferred(const struct bpf_prog *fp);
---- a/include/linux/nospec.h
-+++ b/include/linux/nospec.h
-@@ -65,4 +65,13 @@ int arch_prctl_spec_ctrl_set(struct task
- /* Speculation control for seccomp enforced mitigation */
- void arch_seccomp_spec_mitigate(struct task_struct *task);
-
-+#ifndef CONFIG_ARCH_HAS_REDUCED_MEMORY_SPECULATION
-+static inline void cpu_enter_reduced_memory_speculation(void)
-+{
-+}
-+static inline void cpu_leave_reduced_memory_speculation(void)
-+{
-+}
-+#endif
-+
- #endif /* _LINUX_NOSPEC_H */
---- a/net/core/filter.c
-+++ b/net/core/filter.c
-@@ -31,6 +31,7 @@
- #include <linux/netdevice.h>
- #include <linux/if_packet.h>
- #include <linux/gfp.h>
-+#include <linux/nospec.h>
- #include <net/ip.h>
- #include <net/protocol.h>
- #include <net/netlink.h>
-@@ -2012,17 +2013,23 @@ DEFINE_PER_CPU(unsigned int, bpf_prog_ra
- EXPORT_SYMBOL_GPL(bpf_prog_ran);
- static void bpf_done_on_this_cpu(struct work_struct *work)
- {
-- if (!this_cpu_dec_return(bpf_prog_ran))
-- return;
-+ if (this_cpu_dec_return(bpf_prog_ran)) {
-+ /*
-+ * This is unexpected. The elevated refcount indicates
-+ * being in the *middle* of a BPF program, which should
-+ * be impossible. They are executed inside
-+ * rcu_read_lock() where we can not sleep and where
-+ * preemption is disabled.
-+ */
-+ WARN_ON_ONCE(1);
-+ }
-
- /*
-- * This is unexpected. The elevated refcount indicates
-- * being in the *middle* of a BPF program, which should
-- * be impossible. They are executed inside
-- * rcu_read_lock() where we can not sleep and where
-- * preemption is disabled.
-+ * Unsafe BPF code is no longer running, disable mitigations.
-+ * This must be done after bpf_prog_ran because the mitigation
-+ * code looks at its state.
- */
-- WARN_ON_ONCE(1);
-+ cpu_leave_reduced_memory_speculation();
- }
-
- DEFINE_PER_CPU(struct delayed_work, bpf_prog_delayed_work);
diff --git a/patches.suse/04-bpf-x86-bugs-centralize-SPEC_CTRL-MSR-mask-generation.patch b/patches.suse/04-bpf-x86-bugs-centralize-SPEC_CTRL-MSR-mask-generation.patch
deleted file mode 100644
index 65b3a36484..0000000000
--- a/patches.suse/04-bpf-x86-bugs-centralize-SPEC_CTRL-MSR-mask-generation.patch
+++ /dev/null
@@ -1,102 +0,0 @@
-From: Dave Hansen <dave.hansen@linux.intel.com>
-Subject: x86, bugs: centralize SPEC_CTRL MSR mask generation
-Patch-mainline: Not yet, work in progress
-References: bsc#1087082 CVE-2018-3639
-
-The KVM code manipualtes the SPEC_CTRL MSR when it enters and exits
-the guest. It overwrites the "kernel" value when it enters the guest
-and restores the "kernel" value after leaving the guest.
-
-Both code paths take into account the "base" (x86_spec_ctrl_base)
-value and the per-task TIF_RDS flag (on Intel). They then see if the
-new state differs from the existing state and avoid the MSR write if
-no change is made.
-
-But, these two paths could be a bit more unified. Introduce a new
-function: x86_calculate_kernel_spec_ctrl() which will figure out the
-"kernel" value to contrast it with the "guest" value. We also
-rename the arguments to the set/restore functions to make it clear
-that while the arguments are both "guest" state, they really mean
-different things to the two functions.
-
-This will make the next step easier when we have more state to
-consult in doing the x86_calculate_kernel_spec_ctrl() calculatione
-
-Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
-Cc: Andi Kleen <ak@linux.intel.com>
-Cc: Tim Chen <tim.c.chen@linux.intel.com>
-Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Signed-off-by: Jiri Kosina <jkosina@suse.cz>
----
- arch/x86/kernel/cpu/bugs.c | 50 +++++++++++++++++++++++++++++----------------
- 1 file changed, 33 insertions(+), 17 deletions(-)
-
---- a/arch/x86/kernel/cpu/bugs.c
-+++ b/arch/x86/kernel/cpu/bugs.c
-@@ -150,33 +150,49 @@ u64 x86_spec_ctrl_get_default(void)
- }
- EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
-
--void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
-+static inline u64 intel_rds_mask(void)
- {
-- u64 host = x86_spec_ctrl_base;
-+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
-+ return 0;
-+
-+ return rds_tif_to_spec_ctrl(current_thread_info()->flags);
-+}
-+
-+/*
-+ * Calculate the SPEC_CTRL MSR value that the kernel
-+ * should be using under normal operation.
-+ */
-+static u64 x86_calculate_kernel_spec_ctrl(void)
-+{
-+ u64 spec_ctrl;
-
- if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL))
-- return;
-+ return 0;
-
-- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
-- host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
-+ spec_ctrl = x86_spec_ctrl_base;
-+ spec_ctrl |= intel_rds_mask();
-
-- if (host != guest_spec_ctrl)
-- wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
-+ return spec_ctrl;
- }
--EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
-
--void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
-+/* We are entering a guest and need to set its MSR value. */
-+void x86_spec_ctrl_set_guest(u64 new_spec_ctrl)
- {
-- u64 host = x86_spec_ctrl_base;
--
-- if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL))
-- return;
-+ if (x86_calculate_kernel_spec_ctrl() != new_spec_ctrl)
-+ wrmsrl(MSR_IA32_SPEC_CTRL, new_spec_ctrl);
-+}
-+EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
-
-- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
-- host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
-+/*
-+ * We are leaving a guest and need to restore the kernel's MSR
-+ * value that it uses for normal operation.
-+ */
-+void x86_spec_ctrl_restore_host(u64 current_spec_ctrl)
-+{
-+ u64 new_spec_ctrl = x86_calculate_kernel_spec_ctrl();
-
-- if (host != guest_spec_ctrl)
-- wrmsrl(MSR_IA32_SPEC_CTRL, host);
-+ if (new_spec_ctrl != current_spec_ctrl)
-+ wrmsrl(MSR_IA32_SPEC_CTRL, new_spec_ctrl);
- }
- EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
-
diff --git a/patches.suse/05-bpf-x86-implement-reduced-speculation-when-running-BPF.patch b/patches.suse/05-bpf-x86-implement-reduced-speculation-when-running-BPF.patch
deleted file mode 100644
index ec8ce13b26..0000000000
--- a/patches.suse/05-bpf-x86-implement-reduced-speculation-when-running-BPF.patch
+++ /dev/null
@@ -1,184 +0,0 @@
-From: Dave Hansen <dave.hansen@linux.intel.com>
-Subject: x86: implement reduced speculation when running BPF
-Patch-mainline: Not yet, work in progress
-References: bsc#1087082 CVE-2018-3639
-
-Enable the SPEC_CTRL_RDS feature when running BPF code.
-
-Underneath x86_calculate_kernel_spec_ctrl(), we now check the
-per-cpu bpf_prog_ran counter. If the counter is elevated, we
-need to set the SPEC_CTRL_RDS bit.
-
-We also add MSR writes (via x86_sync_spec_ctrl()) to:
-
- cpu_enter_reduced_memory_speculation() and
- cpu_leave_reduced_memory_speculation()
-
-I'm not super happy that x86_sync_spec_ctrl() does an
-unconditional MSR write. But, they should be infrequent since
-they only happen twice per timeout period.
-
-Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
-Cc: Andi Kleen <ak@linux.intel.com>
-Cc: Tim Chen <tim.c.chen@linux.intel.com>
-Signed-off-by: Jiri Kosina <jkosina@suse.cz>
----
- arch/x86/Kconfig | 4 ++++
- arch/x86/include/asm/rmspec.h | 24 ++++++++++++++++++++++++
- arch/x86/include/asm/spec_ctrl.h | 3 +++
- arch/x86/kernel/cpu/bugs.c | 37 ++++++++++++++++++++++++++++++++++++-
- include/linux/filter.h | 4 +---
- include/linux/nospec.h | 2 ++
- 6 files changed, 70 insertions(+), 4 deletions(-)
- create mode 100644 arch/x86/include/asm/rmspec.h
-
---- /dev/null
-+++ b/arch/x86/include/asm/rmspec.h
-@@ -0,0 +1,24 @@
-+#ifndef _LINUX_RMSPEC_H
-+#define _LINUX_RMSPEC_H
-+#include <asm/msr.h>
-+#include <asm/spec_ctrl.h>
-+
-+/*
-+ * We call these when we *know* the CPU can go in/out of its
-+ * "safer" reduced memory speculation mode.
-+ *
-+ * For BPF, x86_sync_spec_ctrl() reads the per-cpu BPF state
-+ * variable and figures out the MSR value by itself. Thus,
-+ * we do not need to pass the "direction".
-+ */
-+static inline void cpu_enter_reduced_memory_speculation(void)
-+{
-+ x86_sync_spec_ctrl();
-+}
-+
-+static inline void cpu_leave_reduced_memory_speculation(void)
-+{
-+ x86_sync_spec_ctrl();
-+}
-+
-+#endif /* _LINUX_RMSPEC_H */
---- a/arch/x86/include/asm/spec_ctrl.h
-+++ b/arch/x86/include/asm/spec_ctrl.h
-@@ -113,6 +113,9 @@ static inline void x86_ibp_barrier(void)
- extern void x86_spec_ctrl_set_guest(u64);
- extern void x86_spec_ctrl_restore_host(u64);
-
-+/* Write a new SPEC_CTRL MSR based on current kernel state: */
-+extern void x86_sync_spec_ctrl(void);
-+
- /* AMD specific Speculative Store Bypass MSR data */
- extern u64 x86_amd_ls_cfg_base;
- extern u64 x86_amd_ls_cfg_rds_mask;
---- a/arch/x86/Kconfig
-+++ b/arch/x86/Kconfig
-@@ -28,6 +28,7 @@ config X86
- select ARCH_HAS_FAST_MULTIPLIER
- select ARCH_HAS_GCOV_PROFILE_ALL
- select ARCH_HAS_PMEM_API if X86_64
-+ select ARCH_HAS_REDUCED_MEMORY_SPECULATION
- select ARCH_HAS_MMIO_FLUSH
- select ARCH_HAS_SG_CHAIN
- select ARCH_HAVE_NMI_SAFE_CMPXCHG
-@@ -224,6 +225,9 @@ config RWSEM_XCHGADD_ALGORITHM
- config GENERIC_CALIBRATE_DELAY
- def_bool y
-
-+config ARCH_HAS_REDUCED_MEMORY_SPECULATION
-+ def_bool y
-+
- config ARCH_HAS_CPU_RELAX
- def_bool y
-
---- a/arch/x86/kernel/cpu/bugs.c
-+++ b/arch/x86/kernel/cpu/bugs.c
-@@ -10,6 +10,7 @@
- #include <linux/init.h>
- #include <linux/utsname.h>
- #include <linux/cpu.h>
-+#include <linux/filter.h>
- #include <linux/module.h>
- #include <linux/nospec.h>
- #include <linux/prctl.h>
-@@ -152,10 +153,23 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_defa
-
- static inline u64 intel_rds_mask(void)
- {
-+ u64 mask;
-+
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
- return 0;
-
-- return rds_tif_to_spec_ctrl(current_thread_info()->flags);
-+ mask = rds_tif_to_spec_ctrl(current_thread_info()->flags);
-+
-+ /*
-+ * BPF programs can be exploited to attack the kernel.
-+ * Leave the RDS bit on when we recently ran one. This
-+ * bit gets cleared after a BFP program has not run on
-+ * the CPU for a while.
-+ */
-+ if (get_cpu_var(bpf_prog_ran))
-+ mask |= SPEC_CTRL_RDS;
-+
-+ return mask;
- }
-
- /*
-@@ -196,6 +210,27 @@ void x86_restore_host_spec_ctrl(u64 curr
- }
- EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
-
-+/*
-+ * A condition that may affect the SPEC_CTRL MSR has changed.
-+ * Recalculate a new value for this CPU and set it.
-+ *
-+ * It is not easy to optimize the wrmsrl() away unless the
-+ * callers have a full understanding of all the conditions
-+ * that affect the output of x86_calculate_kernel_spec_ctrl().
-+ *
-+ * Try not to call this too often.
-+ */
-+void x86_sync_spec_ctrl(void)
-+{
-+ u64 new_spec_ctrl = x86_calculate_kernel_spec_ctrl();
-+
-+ if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL))
-+ return;
-+
-+ wrmsrl(MSR_IA32_SPEC_CTRL, new_spec_ctrl);
-+}
-+EXPORT_SYMBOL_GPL(x86_sync_spec_ctrl);
-+
- static void x86_amd_rds_enable(void)
- {
- u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_rds_mask;
---- a/include/linux/filter.h
-+++ b/include/linux/filter.h
-@@ -353,13 +353,11 @@ DECLARE_PER_CPU(unsigned int, bpf_prog_r
-
- static inline void bpf_enter_prog(const struct bpf_prog *fp)
- {
-- int *count = &get_cpu_var(bpf_prog_ran);
-- (*count)++;
- /*
- * Upon the first entry to BPF code, we need to reduce
- * memory speculation to mitigate attacks targeting it.
- */
-- if (*count == 1)
-+ if (this_cpu_inc_return(bpf_prog_ran) == 1)
- cpu_enter_reduced_memory_speculation();
- }
-
---- a/include/linux/nospec.h
-+++ b/include/linux/nospec.h
-@@ -72,6 +72,8 @@ static inline void cpu_enter_reduced_mem
- static inline void cpu_leave_reduced_memory_speculation(void)
- {
- }
-+#else
-+#include <asm/rmspec.h>
- #endif
-
- #endif /* _LINUX_NOSPEC_H */
diff --git a/patches.suse/26-x86-bugs-rename-rds-to-ssbd.patch b/patches.suse/26-x86-bugs-rename-rds-to-ssbd.patch
index bed4e1d0fa..7f4692f962 100644
--- a/patches.suse/26-x86-bugs-rename-rds-to-ssbd.patch
+++ b/patches.suse/26-x86-bugs-rename-rds-to-ssbd.patch
@@ -78,7 +78,7 @@ Acked-by: Borislav Petkov <bp@suse.de>
#define MSR_IA32_BBL_CR_CTL 0x00000119
--- a/arch/x86/include/asm/spec_ctrl.h
+++ b/arch/x86/include/asm/spec_ctrl.h
-@@ -118,20 +118,20 @@ extern void x86_sync_spec_ctrl(void);
+@@ -115,20 +115,20 @@ extern void x86_spec_ctrl_restore_host(u
/* AMD specific Speculative Store Bypass MSR data */
extern u64 x86_amd_ls_cfg_base;
@@ -168,7 +168,7 @@ Acked-by: Borislav Petkov <bp@suse.de>
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
-@@ -46,10 +46,10 @@ static u64 __ro_after_init x86_spec_ctrl
+@@ -45,10 +45,10 @@ static u64 __ro_after_init x86_spec_ctrl
/*
* AMD specific MSR info for Speculative Store Bypass control.
@@ -181,7 +181,7 @@ Acked-by: Borislav Petkov <bp@suse.de>
void __init check_bugs(void)
{
-@@ -146,7 +146,7 @@ u64 x86_spec_ctrl_get_default(void)
+@@ -145,7 +145,7 @@ u64 x86_spec_ctrl_get_default(void)
u64 msrval = x86_spec_ctrl_base;
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
@@ -190,27 +190,26 @@ Acked-by: Borislav Petkov <bp@suse.de>
return msrval;
}
EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
-@@ -158,7 +158,7 @@ static inline u64 intel_rds_mask(void)
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
- return 0;
+@@ -158,7 +158,7 @@ void x86_spec_ctrl_set_guest(u64 guest_s
+ return;
-- mask = rds_tif_to_spec_ctrl(current_thread_info()->flags);
-+ mask = ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+- host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
++ host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
- /*
- * BPF programs can be exploited to attack the kernel.
-@@ -167,7 +167,7 @@ static inline u64 intel_rds_mask(void)
- * the CPU for a while.
- */
- if (get_cpu_var(bpf_prog_ran))
-- mask |= SPEC_CTRL_RDS;
-+ mask |= SPEC_CTRL_SSBD;
+ if (host != guest_spec_ctrl)
+ wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
+@@ -173,18 +173,18 @@ void x86_spec_ctrl_restore_host(u64 gues
+ return;
- return mask;
- }
-@@ -231,11 +231,11 @@ void x86_sync_spec_ctrl(void)
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+- host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
++ host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
+
+ if (host != guest_spec_ctrl)
+ wrmsrl(MSR_IA32_SPEC_CTRL, host);
}
- EXPORT_SYMBOL_GPL(x86_sync_spec_ctrl);
+ EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
-static void x86_amd_rds_enable(void)
+static void x86_amd_ssb_disable(void)
@@ -223,7 +222,7 @@ Acked-by: Borislav Petkov <bp@suse.de>
wrmsrl(MSR_AMD64_LS_CFG, msrval);
}
-@@ -515,7 +515,7 @@ static enum ssb_mitigation_cmd __init __
+@@ -474,7 +474,7 @@ static enum ssb_mitigation_cmd __init __
enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
enum ssb_mitigation_cmd cmd;
@@ -232,7 +231,7 @@ Acked-by: Borislav Petkov <bp@suse.de>
return mode;
cmd = ssb_parse_cmdline();
-@@ -549,7 +549,7 @@ static enum ssb_mitigation_cmd __init __
+@@ -508,7 +508,7 @@ static enum ssb_mitigation_cmd __init __
/*
* We have three CPU feature flags that are in play here:
* - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
@@ -241,7 +240,7 @@ Acked-by: Borislav Petkov <bp@suse.de>
* - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
*/
if (mode == SPEC_STORE_BYPASS_DISABLE) {
-@@ -560,12 +560,12 @@ static enum ssb_mitigation_cmd __init __
+@@ -519,12 +519,12 @@ static enum ssb_mitigation_cmd __init __
*/
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
@@ -258,7 +257,7 @@ Acked-by: Borislav Petkov <bp@suse.de>
break;
}
}
-@@ -598,16 +598,16 @@ static int ssb_prctl_set(struct task_str
+@@ -557,16 +557,16 @@ static int ssb_prctl_set(struct task_str
if (task_spec_ssb_force_disable(task))
return -EPERM;
task_clear_spec_ssb_disable(task);
@@ -278,7 +277,7 @@ Acked-by: Borislav Petkov <bp@suse.de>
break;
default:
return -ERANGE;
-@@ -677,7 +677,7 @@ void x86_spec_ctrl_setup_ap(void)
+@@ -636,7 +636,7 @@ void x86_spec_ctrl_setup_ap(void)
x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
diff --git a/patches.suse/bpf-prevent-memory-disambiguation-attack.patch b/patches.suse/bpf-prevent-memory-disambiguation-attack.patch
new file mode 100644
index 0000000000..d89ef12009
--- /dev/null
+++ b/patches.suse/bpf-prevent-memory-disambiguation-attack.patch
@@ -0,0 +1,208 @@
+From: Alexei Starovoitov <ast@kernel.org>
+Date: Tue, 15 May 2018 09:27:05 -0700
+Subject: [PATCH] bpf: prevent memory disambiguation attack
+References: bsc#1087082 CVE-2018-3639
+Patch-mainline: v4.17-rc7
+Git-commit: af86ca4e3088fe5eacf2f7e58c01fa68ca067672
+
+Detect code patterns where malicious 'speculative store bypass' can be used
+and sanitize such patterns.
+
+ 39: (bf) r3 = r10
+ 40: (07) r3 += -216
+ 41: (79) r8 = *(u64 *)(r7 +0) // slow read
+ 42: (7a) *(u64 *)(r10 -72) = 0 // verifier inserts this instruction
+ 43: (7b) *(u64 *)(r8 +0) = r3 // this store becomes slow due to r8
+ 44: (79) r1 = *(u64 *)(r6 +0) // cpu speculatively executes this load
+ 45: (71) r2 = *(u8 *)(r1 +0) // speculatively arbitrary 'load byte'
+ // is now sanitized
+
+Above code after x86 JIT becomes:
+ e5: mov %rbp,%rdx
+ e8: add $0xffffffffffffff28,%rdx
+ ef: mov 0x0(%r13),%r14
+ f3: movq $0x0,-0x48(%rbp)
+ fb: mov %rdx,0x0(%r14)
+ ff: mov 0x0(%rbx),%rdi
+103: movzbq 0x0(%rdi),%rsi
+
+[jkosina@suse.cz: adjustments needed for 4.4 port (insn_idx passing mostly)]
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ kernel/bpf/verifier.c | 79 ++++++++++++++++++++++++++++++++++++++++++--------
+ 1 file changed, 67 insertions(+), 12 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -192,6 +192,7 @@ struct bpf_insn_aux_data {
+ struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
+ };
+ bool seen; /* this insn was processed by the verifier */
++ int sanitize_stack_off; /* stack slot to be cleared */
+ };
+
+ #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
+@@ -569,10 +570,12 @@ static bool is_spillable_regtype(enum bp
+ /* check_stack_read/write functions track spill/fill of registers,
+ * stack boundary and alignment are checked in check_mem_access()
+ */
+-static int check_stack_write(struct verifier_state *state, int off, int size,
+- int value_regno)
++static int check_stack_write(struct verifier_env *env,
++ struct verifier_state *state, int off, int size,
++ int value_regno, int insn_idx)
+ {
+ int i;
++ int slot = -off - 1, spi = slot / BPF_REG_SIZE;
+ /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
+ * so it's aligned access and [off, off + size) are within stack limits
+ */
+@@ -590,8 +593,32 @@ static int check_stack_write(struct veri
+ state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] =
+ state->regs[value_regno];
+
+- for (i = 0; i < BPF_REG_SIZE; i++)
++ for (i = 0; i < BPF_REG_SIZE; i++) {
++ if (state->stack_slot_type[MAX_BPF_STACK + off + i] == STACK_MISC &&
++ !env->allow_ptr_leaks) {
++ int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
++ int soff = (-spi - 1) * BPF_REG_SIZE;
++
++ /* detected reuse of integer stack slot with a pointer
++ * which means either llvm is reusing stack slot or
++ * an attacker is trying to exploit CVE-2018-3639
++ * (speculative store bypass)
++ * Have to sanitize that slot with preemptive
++ * store of zero.
++ */
++ if (*poff && *poff != soff) {
++ /* disallow programs where single insn stores
++ * into two different stack slots, since verifier
++ * cannot sanitize them
++ */
++ verbose("insn %d cannot access two stack slots fp%d and fp%d",
++ insn_idx, *poff, soff);
++ return -EINVAL;
++ }
++ *poff = soff;
++ }
+ state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL;
++ }
+ } else {
+ /* regular write of data into stack */
+ state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] =
+@@ -696,7 +723,7 @@ static bool is_ctx_reg(struct verifier_e
+ * if t==write && value_regno==-1, some unknown value is stored into memory
+ * if t==read && value_regno==-1, don't care what we read from memory
+ */
+-static int check_mem_access(struct verifier_env *env, u32 regno, int off,
++static int check_mem_access(struct verifier_env *env, int insn_idx, u32 regno, int off,
+ int bpf_size, enum bpf_access_type t,
+ int value_regno)
+ {
+@@ -748,7 +775,7 @@ static int check_mem_access(struct verif
+ verbose("attempt to corrupt spilled pointer on stack\n");
+ return -EACCES;
+ }
+- err = check_stack_write(state, off, size, value_regno);
++ err = check_stack_write(env, state, off, size, value_regno, insn_idx);
+ } else {
+ err = check_stack_read(state, off, size, value_regno);
+ }
+@@ -760,7 +787,7 @@ static int check_mem_access(struct verif
+ return err;
+ }
+
+-static int check_xadd(struct verifier_env *env, struct bpf_insn *insn)
++static int check_xadd(struct verifier_env *env, int insn_idx, struct bpf_insn *insn)
+ {
+ struct reg_state *regs = env->cur_state.regs;
+ int err;
+@@ -793,13 +820,13 @@ static int check_xadd(struct verifier_en
+ }
+
+ /* check whether atomic_add can read the memory */
+- err = check_mem_access(env, insn->dst_reg, insn->off,
++ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
+ BPF_SIZE(insn->code), BPF_READ, -1);
+ if (err)
+ return err;
+
+ /* check whether atomic_add can write into the same memory */
+- return check_mem_access(env, insn->dst_reg, insn->off,
++ return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
+ BPF_SIZE(insn->code), BPF_WRITE, -1);
+ }
+
+@@ -1838,7 +1865,7 @@ static int do_check(struct verifier_env
+ /* check that memory (src_reg + off) is readable,
+ * the state of dst_reg will be updated by this func
+ */
+- err = check_mem_access(env, insn->src_reg, insn->off,
++ err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
+ BPF_SIZE(insn->code), BPF_READ,
+ insn->dst_reg);
+ if (err)
+@@ -1876,7 +1903,7 @@ static int do_check(struct verifier_env
+ enum bpf_reg_type *prev_dst_type, dst_reg_type;
+
+ if (BPF_MODE(insn->code) == BPF_XADD) {
+- err = check_xadd(env, insn);
++ err = check_xadd(env, insn_idx, insn);
+ if (err)
+ return err;
+ insn_idx++;
+@@ -1895,7 +1922,7 @@ static int do_check(struct verifier_env
+ dst_reg_type = regs[insn->dst_reg].type;
+
+ /* check that memory (dst_reg + off) is writeable */
+- err = check_mem_access(env, insn->dst_reg, insn->off,
++ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
+ BPF_SIZE(insn->code), BPF_WRITE,
+ insn->src_reg);
+ if (err)
+@@ -1930,7 +1957,7 @@ static int do_check(struct verifier_env
+ }
+
+ /* check that memory (dst_reg + off) is writeable */
+- err = check_mem_access(env, insn->dst_reg, insn->off,
++ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
+ BPF_SIZE(insn->code), BPF_WRITE,
+ -1);
+ if (err)
+@@ -2227,6 +2254,34 @@ static int convert_ctx_accesses(struct v
+ else
+ continue;
+
++ if (type == BPF_WRITE &&
++ env->insn_aux_data[i + delta].sanitize_stack_off) {
++ struct bpf_insn patch[] = {
++ /* Sanitize suspicious stack slot with zero.
++ * There are no memory dependencies for this store,
++ * since it's only using frame pointer and immediate
++ * constant of zero
++ */
++ BPF_ST_MEM(BPF_DW, BPF_REG_FP,
++ env->insn_aux_data[i + delta].sanitize_stack_off,
++ 0),
++ /* the original STX instruction will immediately
++ * overwrite the same stack slot with appropriate value
++ */
++ *insn,
++ };
++
++ cnt = ARRAY_SIZE(patch);
++ new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
++ if (!new_prog)
++ return -ENOMEM;
++
++ delta += cnt - 1;
++ env->prog = new_prog;
++ insn = new_prog->insnsi + i + delta;
++ continue;
++ }
++
+ if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
+ continue;
+
diff --git a/series.conf b/series.conf
index a33ef9939f..5aef8cd7e3 100644
--- a/series.conf
+++ b/series.conf
@@ -23101,20 +23101,14 @@
patches.suse/24-seccomp-move-speculation-migitation-control-to-arch-code.patch
patches.suse/25-x86-speculation-make-seccomp-the-default-mode-for-speculative-store-bypass.patch
- # BPF SSB
- patches.suse/01-bpf-add-enter_exit-markers.patch
- patches.suse/02-bpf-track-entry-to-and-exit-from-BFP-code.patch
- patches.suse/03-bpf-use-reduced-speculation-mitigations.patch
- patches.suse/04-bpf-x86-bugs-centralize-SPEC_CTRL-MSR-mask-generation.patch
- patches.suse/05-bpf-x86-implement-reduced-speculation-when-running-BPF.patch
-
patches.suse/26-x86-bugs-rename-rds-to-ssbd.patch
patches.suse/27-proc-use-underscores-for-ssbd-in-status.patch
patches.suse/28-ssbd-remove-from-allwork-mask.patch
patches.suse/29-kvm-svm-move-spec-control-call-after-restore-of-gs.patch
- patches.suse/0004-x86-bugs-centralize-SPEC_CTRL-MSR-mask-generation-FIX-SLE12.patch
+ # BPF SSB
+ patches.suse/bpf-prevent-memory-disambiguation-attack.patch
########################################################
# You'd better have a good reason for adding a patch