Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBorislav Petkov <bp@suse.de>2018-01-12 21:55:29 +0100
committerBorislav Petkov <bp@suse.de>2018-01-12 21:55:37 +0100
commit51b0edb1a36bf197e31923492d18e957c2bf4cf3 (patch)
tree272618265c7c0c1d53737089967fd947c032787d
parentf297e38106f592101e6a4f9a9f3c300b3c7f1e1b (diff)
x86/espfix/64: Stop assuming that pt_regs is on the entry stack
(bsc#1068032 CVE-2017-5754).
-rw-r--r--patches.arch/14-x86-espfix-64-stop-assuming-that-pt_regs-is-on-the-entry-stack.patch111
-rw-r--r--series.conf1
2 files changed, 112 insertions, 0 deletions
diff --git a/patches.arch/14-x86-espfix-64-stop-assuming-that-pt_regs-is-on-the-entry-stack.patch b/patches.arch/14-x86-espfix-64-stop-assuming-that-pt_regs-is-on-the-entry-stack.patch
new file mode 100644
index 0000000000..293cf6a505
--- /dev/null
+++ b/patches.arch/14-x86-espfix-64-stop-assuming-that-pt_regs-is-on-the-entry-stack.patch
@@ -0,0 +1,111 @@
+From: Andy Lutomirski <luto@kernel.org>
+Date: Mon, 4 Dec 2017 15:07:22 +0100
+Subject: x86/espfix/64: Stop assuming that pt_regs is on the entry stack
+Git-commit: 6d9256f0a89eaff97fca6006100bcaea8d1d8bdb
+Patch-mainline: v4.15-rc5
+References: bsc#1068032 CVE-2017-5754
+
+When we start using an entry trampoline, a #GP from userspace will
+be delivered on the entry stack, not on the task stack. Fix the
+espfix64 #DF fixup to set up #GP according to TSS.SP0, rather than
+assuming that pt_regs + 1 == SP0. This won't change anything
+without an entry stack, but it will make the code continue to work
+when an entry stack is added.
+
+While we're at it, improve the comments to explain what's actually
+going on.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Borislav Petkov <bpetkov@suse.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: David Laight <David.Laight@aculab.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: Eduardo Valentin <eduval@amazon.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: aliguori@amazon.com
+Cc: daniel.gruss@iaik.tugraz.at
+Cc: hughd@google.com
+Cc: keescook@google.com
+Link: https://lkml.kernel.org/r/20171204150606.130778051@linutronix.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/traps.c | 37 ++++++++++++++++++++++++++++---------
+ 1 file changed, 28 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -356,9 +356,15 @@ dotraplinkage void do_double_fault(struc
+
+ /*
+ * If IRET takes a non-IST fault on the espfix64 stack, then we
+- * end up promoting it to a doublefault. In that case, modify
+- * the stack to make it look like we just entered the #GP
+- * handler from user space, similar to bad_iret.
++ * end up promoting it to a doublefault. In that case, take
++ * advantage of the fact that we're not using the normal (TSS.sp0)
++ * stack right now. We can write a fake #GP(0) frame at TSS.sp0
++ * and then modify our own IRET frame so that, when we return,
++ * we land directly at the #GP(0) vector with the stack already
++ * set up according to its expectations.
++ *
++ * The net result is that our #GP handler will think that we
++ * entered from usermode with the bad user context.
+ *
+ * No need for ist_enter here because we don't use RCU.
+ */
+@@ -366,13 +372,26 @@ dotraplinkage void do_double_fault(struc
+ regs->cs == __KERNEL_CS &&
+ regs->ip == (unsigned long)native_irq_return_iret)
+ {
+- struct pt_regs *normal_regs = task_pt_regs(current);
++ struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss.x86_tss.sp0) - 1;
+
+- /* Fake a #GP(0) from userspace. */
+- memmove(&normal_regs->ip, (void *)regs->sp, 5*8);
+- normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */
++ /*
++ * regs->sp points to the failing IRET frame on the
++ * ESPFIX64 stack. Copy it to the entry stack. This fills
++ * in gpregs->ss through gpregs->ip.
++ *
++ */
++ memmove(&gpregs->ip, (void *)regs->sp, 5*8);
++ gpregs->orig_ax = 0; /* Missing (lost) #GP error code */
++
++ /*
++ * Adjust our frame so that we return straight to the #GP
++ * vector with the expected RSP value. This is safe because
++ * we won't enable interupts or schedule before we invoke
++ * general_protection, so nothing will clobber the stack
++ * frame we just set up.
++ */
+ regs->ip = (unsigned long)general_protection;
+- regs->sp = (unsigned long)&normal_regs->orig_ax;
++ regs->sp = (unsigned long)&gpregs->orig_ax;
+
+ return;
+ }
+@@ -397,7 +416,7 @@ dotraplinkage void do_double_fault(struc
+ *
+ * Processors update CR2 whenever a page fault is detected. If a
+ * second page fault occurs while an earlier page fault is being
+- * deliv- ered, the faulting linear address of the second fault will
++ * delivered, the faulting linear address of the second fault will
+ * overwrite the contents of CR2 (replacing the previous
+ * address). These updates to CR2 occur even if the page fault
+ * results in a double fault or occurs during the delivery of a
diff --git a/series.conf b/series.conf
index 8131888e66..0b45d96523 100644
--- a/series.conf
+++ b/series.conf
@@ -7385,6 +7385,7 @@
patches.arch/11-x86-entry-move-sysenter_stack-to-the-beginning-of-struct-tss_struct.patch
patches.arch/12-x86-entry-remap-the-tss-into-the-cpu-entry-area.patch
patches.arch/13-x86-entry-64-separate-cpu_current_top_of_stack-from-tss-sp0.patch
+ patches.arch/14-x86-espfix-64-stop-assuming-that-pt_regs-is-on-the-entry-stack.patch
########################################################
# Staging tree patches