Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBorislav Petkov <bp@suse.de>2018-01-12 21:55:29 +0100
committerBorislav Petkov <bp@suse.de>2018-01-12 21:55:37 +0100
commit5b26955607c14607381b9ab7f0f933474ee79b4e (patch)
tree9eb828cf6a625ed4590f084b4ea0edcd324ab212
parente415ea995607e647558ce6830ce71086f2c4ed4f (diff)
x86/xen/64: Rearrange the SYSCALL entries (bsc#1068032
CVE-2017-5754).
-rw-r--r--patches.arch/17-x86-xen-64-rearrange-the-syscall-entries.patch136
-rw-r--r--series.conf1
2 files changed, 137 insertions, 0 deletions
diff --git a/patches.arch/17-x86-xen-64-rearrange-the-syscall-entries.patch b/patches.arch/17-x86-xen-64-rearrange-the-syscall-entries.patch
new file mode 100644
index 0000000000..d3ad3520ca
--- /dev/null
+++ b/patches.arch/17-x86-xen-64-rearrange-the-syscall-entries.patch
@@ -0,0 +1,136 @@
+From: Andy Lutomirski <luto@kernel.org>
+Date: Mon, 7 Aug 2017 20:59:21 -0700
+Subject: x86/xen/64: Rearrange the SYSCALL entries
+Git-commit: 8a9949bc71a71b3dd633255ebe8f8869b1f73474
+Patch-mainline: v4.14-rc1
+References: bsc#1068032 CVE-2017-5754
+
+Xen's raw SYSCALL entries are much less weird than native. Rather
+than fudging them to look like native entries, use the Xen-provided
+stack frame directly.
+
+This lets us eliminate entry_SYSCALL_64_after_swapgs and two uses of
+the SWAPGS_UNSAFE_STACK paravirt hook. The SYSENTER code would
+benefit from similar treatment.
+
+This makes one change to the native code path: the compat
+instruction that clears the high 32 bits of %rax is moved slightly
+later. I'd be surprised if this affects performance at all.
+
+Tested-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Cc: Borislav Petkov <bpetkov@suse.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: xen-devel@lists.xenproject.org
+Link: http://lkml.kernel.org/r/7c88ed36805d36841ab03ec3b48b4122c4418d71.1502164668.git.luto@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/entry/entry_64.S | 9 ++-------
+ arch/x86/entry/entry_64_compat.S | 7 +++----
+ arch/x86/xen/xen-asm_64.S | 23 +++++++++--------------
+ 3 files changed, 14 insertions(+), 25 deletions(-)
+
+--- a/arch/x86/entry/entry_64_compat.S
++++ b/arch/x86/entry/entry_64_compat.S
+@@ -183,21 +183,20 @@ ENDPROC(entry_SYSENTER_compat)
+ */
+ ENTRY(entry_SYSCALL_compat)
+ /* Interrupts are off on entry. */
+- SWAPGS_UNSAFE_STACK
++ swapgs
+
+ /* Stash user ESP and switch to the kernel stack. */
+ movl %esp, %r8d
+ movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+
+- /* Zero-extending 32-bit regs, do not remove */
+- movl %eax, %eax
+-
+ /* Construct struct pt_regs on stack */
+ pushq $__USER32_DS /* pt_regs->ss */
+ pushq %r8 /* pt_regs->sp */
+ pushq %r11 /* pt_regs->flags */
+ pushq $__USER32_CS /* pt_regs->cs */
+ pushq %rcx /* pt_regs->ip */
++GLOBAL(entry_SYSCALL_compat_after_hwframe)
++ movl %eax, %eax /* discard orig_ax high bits */
+ pushq %rax /* pt_regs->orig_ax */
+ pushq %rdi /* pt_regs->di */
+ pushq %rsi /* pt_regs->si */
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -142,14 +142,8 @@ ENTRY(entry_SYSCALL_64)
+ * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
+ * it is too small to ever cause noticeable irq latency.
+ */
+- SWAPGS_UNSAFE_STACK
+- /*
+- * A hypervisor implementation might want to use a label
+- * after the swapgs, so that it can do the swapgs
+- * for the guest and jump here on syscall.
+- */
+-GLOBAL(entry_SYSCALL_64_after_swapgs)
+
++ swapgs
+ movq %rsp, PER_CPU_VAR(rsp_scratch)
+ movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+
+@@ -161,6 +155,7 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
+ pushq %r11 /* pt_regs->flags */
+ pushq $__USER_CS /* pt_regs->cs */
+ pushq %rcx /* pt_regs->ip */
++GLOBAL(entry_SYSCALL_64_after_hwframe)
+ pushq %rax /* pt_regs->orig_ax */
+ pushq %rdi /* pt_regs->di */
+ pushq %rsi /* pt_regs->si */
+--- a/arch/x86/xen/xen-asm_64.S
++++ b/arch/x86/xen/xen-asm_64.S
+@@ -113,34 +113,29 @@ RELOC(xen_sysret64, 1b+1)
+ * rip
+ * r11
+ * rsp->rcx
+- *
+- * In all the entrypoints, we undo all that to make it look like a
+- * CPU-generated syscall/sysenter and jump to the normal entrypoint.
+ */
+
+-.macro undo_xen_syscall
+- mov 0*8(%rsp), %rcx
+- mov 1*8(%rsp), %r11
+- mov 5*8(%rsp), %rsp
+-.endm
+-
+ /* Normal 64-bit system call target */
+ ENTRY(xen_syscall_target)
+- undo_xen_syscall
+- jmp entry_SYSCALL_64_after_swapgs
++ popq %rcx
++ popq %r11
++ jmp entry_SYSCALL_64_after_hwframe
+ ENDPROC(xen_syscall_target)
+
+ #ifdef CONFIG_IA32_EMULATION
+
+ /* 32-bit compat syscall target */
+ ENTRY(xen_syscall32_target)
+- undo_xen_syscall
+- jmp entry_SYSCALL_compat
++ popq %rcx
++ popq %r11
++ jmp entry_SYSCALL_compat_after_hwframe
+ ENDPROC(xen_syscall32_target)
+
+ /* 32-bit compat sysenter target */
+ ENTRY(xen_sysenter_target)
+- undo_xen_syscall
++ mov 0*8(%rsp), %rcx
++ mov 1*8(%rsp), %r11
++ mov 5*8(%rsp), %rsp
+ jmp entry_SYSENTER_compat
+ ENDPROC(xen_sysenter_target)
+
diff --git a/series.conf b/series.conf
index 8c0bfc5069..b31f12c5a7 100644
--- a/series.conf
+++ b/series.conf
@@ -7388,6 +7388,7 @@
patches.arch/14-x86-espfix-64-stop-assuming-that-pt_regs-is-on-the-entry-stack.patch
patches.arch/15-x86-entry-64-use-a-per-cpu-trampoline-stack-for-idt-entries.patch
patches.arch/16-x86-entry-64-return-to-userspace-from-the-trampoline-stack.patch
+ patches.arch/17-x86-xen-64-rearrange-the-syscall-entries.patch
########################################################
# Staging tree patches