Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBorislav Petkov <bp@suse.de>2018-01-12 21:55:29 +0100
committerBorislav Petkov <bp@suse.de>2018-01-12 21:55:36 +0100
commitf297e38106f592101e6a4f9a9f3c300b3c7f1e1b (patch)
tree54a8f7bb535bf437425a5b973b85932ba9f41390
parentc448b944a1f160e0ada0aa0ac5c1aa7d31b2a89f (diff)
x86/entry/64: Separate cpu_current_top_of_stack from TSS.sp0
(bsc#1068032 CVE-2017-5754).
-rw-r--r--patches.arch/13-x86-entry-64-separate-cpu_current_top_of_stack-from-tss-sp0.patch141
-rw-r--r--series.conf1
2 files changed, 142 insertions, 0 deletions
diff --git a/patches.arch/13-x86-entry-64-separate-cpu_current_top_of_stack-from-tss-sp0.patch b/patches.arch/13-x86-entry-64-separate-cpu_current_top_of_stack-from-tss-sp0.patch
new file mode 100644
index 0000000000..146889cd95
--- /dev/null
+++ b/patches.arch/13-x86-entry-64-separate-cpu_current_top_of_stack-from-tss-sp0.patch
@@ -0,0 +1,141 @@
+From: Andy Lutomirski <luto@kernel.org>
+Date: Mon, 4 Dec 2017 15:07:21 +0100
+Subject: x86/entry/64: Separate cpu_current_top_of_stack from TSS.sp0
+Git-commit: 9aaefe7b59ae00605256a7d6bd1c1456432495fc
+Patch-mainline: v4.15-rc5
+References: bsc#1068032 CVE-2017-5754
+
+On 64-bit kernels, we used to assume that TSS.sp0 was the current
+top of stack. With the addition of an entry trampoline, this will
+no longer be the case. Store the current top of stack in TSS.sp1,
+which is otherwise unused but shares the same cacheline.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Borislav Petkov <bpetkov@suse.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: David Laight <David.Laight@aculab.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: Eduardo Valentin <eduval@amazon.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: aliguori@amazon.com
+Cc: daniel.gruss@iaik.tugraz.at
+Cc: hughd@google.com
+Cc: keescook@google.com
+Link: https://lkml.kernel.org/r/20171204150606.050864668@linutronix.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/processor.h | 18 +++++++++++++-----
+ arch/x86/include/asm/thread_info.h | 2 +-
+ arch/x86/kernel/asm-offsets_64.c | 1 +
+ arch/x86/kernel/process.c | 10 ++++++++++
+ arch/x86/kernel/process_64.c | 1 +
+ 5 files changed, 26 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -308,7 +308,13 @@ struct x86_hw_tss {
+ struct x86_hw_tss {
+ u32 reserved1;
+ u64 sp0;
++
++ /*
++ * We store cpu_current_top_of_stack in sp1 so it's always accessible.
++ * Linux does not use ring 1, so sp1 is not otherwise needed.
++ */
+ u64 sp1;
++
+ u64 sp2;
+ u64 reserved2;
+ u64 ist[7];
+@@ -367,6 +373,8 @@ DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_
+
+ #ifdef CONFIG_X86_32
+ DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
++#else
++#define cpu_current_top_of_stack cpu_tss.x86_tss.sp1
+ #endif
+
+ /*
+@@ -538,12 +546,12 @@ static inline void native_swapgs(void)
+
+ static inline unsigned long current_top_of_stack(void)
+ {
+-#ifdef CONFIG_X86_64
+- return this_cpu_read_stable(cpu_tss.x86_tss.sp0);
+-#else
+- /* sp0 on x86_32 is special in and around vm86 mode. */
++ /*
++ * We can't read directly from tss.sp0: sp0 on x86_32 is special in
++ * and around vm86 mode and sp0 on x86_64 is special because of the
++ * entry trampoline.
++ */
+ return this_cpu_read_stable(cpu_current_top_of_stack);
+-#endif
+ }
+
+ static inline bool on_thread_stack(void)
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -203,7 +203,7 @@ static inline int arch_within_stack_fram
+ #else /* !__ASSEMBLY__ */
+
+ #ifdef CONFIG_X86_64
+-# define cpu_current_top_of_stack (cpu_tss + TSS_sp0)
++# define cpu_current_top_of_stack (cpu_tss + TSS_sp1)
+ #endif
+
+ #endif
+--- a/arch/x86/kernel/asm-offsets_64.c
++++ b/arch/x86/kernel/asm-offsets_64.c
+@@ -65,6 +65,7 @@ int main(void)
+
+ OFFSET(TSS_ist, tss_struct, x86_tss.ist);
+ OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
++ OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
+ BLANK();
+
+ #ifdef CONFIG_CC_STACKPROTECTOR
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -461,6 +461,7 @@ __switch_to(struct task_struct *prev_p,
+ * Switch the PDA and FPU contexts.
+ */
+ this_cpu_write(current_task, next_p);
++ this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
+
+ /* Reload sp0. */
+ update_sp0(next_p);
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -55,6 +55,16 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(
+ * Poison it.
+ */
+ .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
++
++#ifdef CONFIG_X86_64
++ /*
++ * .sp1 is cpu_current_top_of_stack. The init task never
++ * runs user code, but cpu_current_top_of_stack should still
++ * be well defined before the first context switch.
++ */
++ .sp1 = TOP_OF_INIT_STACK,
++#endif
++
+ #ifdef CONFIG_X86_32
+ .ss0 = __KERNEL_DS,
+ .ss1 = __KERNEL_CS,
diff --git a/series.conf b/series.conf
index ab6762e1f0..8131888e66 100644
--- a/series.conf
+++ b/series.conf
@@ -7384,6 +7384,7 @@
patches.arch/10-x86-dumpstack-handle-stack-overflow-on-all-stacks.patch
patches.arch/11-x86-entry-move-sysenter_stack-to-the-beginning-of-struct-tss_struct.patch
patches.arch/12-x86-entry-remap-the-tss-into-the-cpu-entry-area.patch
+ patches.arch/13-x86-entry-64-separate-cpu_current_top_of_stack-from-tss-sp0.patch
########################################################
# Staging tree patches