Home Home > GIT Browse > openSUSE-15.0
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2017-06-22 16:37:55 +0200
committerJiri Kosina <jkosina@suse.cz>2017-06-22 16:37:55 +0200
commit414ea9fcc8bd854a7201242e86e2b1c733cbc49e (patch)
treef400fe24000021c7244bfd3354b917e410133c68
parentfa1fcfd11b039bc91a7a4bba4d42e1b4d53262a7 (diff)
parent95bb14bf448753117d5622a5fc5ec3e9ee6cbdf7 (diff)
Merge remote-tracking branch 'origin/users/mhocko/SLE11-SP4/for-next_EMBARGO' into SLE11-SP4_EMBARGOrpm-3.0.101-107
-rw-r--r--patches.fixes/0001-mm-mmap-do-not-blow-on-PROT_NONE-MAP_FIXED-holes-in-.patch69
-rw-r--r--patches.fixes/0002-mm-do-not-collapse-stack-gap-into-THP-fix.patch40
-rw-r--r--patches.fixes/mm-thp-fix-collapsing-of-hugepages-on-madvise.patch14
-rw-r--r--patches.suse/mm-thp-allocate-transparent-hugepages-on-local-node.patch14
-rw-r--r--series.conf5
5 files changed, 128 insertions, 14 deletions
diff --git a/patches.fixes/0001-mm-mmap-do-not-blow-on-PROT_NONE-MAP_FIXED-holes-in-.patch b/patches.fixes/0001-mm-mmap-do-not-blow-on-PROT_NONE-MAP_FIXED-holes-in-.patch
new file mode 100644
index 0000000000..9cbd966add
--- /dev/null
+++ b/patches.fixes/0001-mm-mmap-do-not-blow-on-PROT_NONE-MAP_FIXED-holes-in-.patch
@@ -0,0 +1,69 @@
+From 5a3d0bf0a4660bad63455830225c212ef6869392 Mon Sep 17 00:00:00 2001
+From: Michal Hocko <mhocko@suse.com>
+Date: Thu, 22 Jun 2017 13:27:30 +0200
+Subject: [PATCH] mm, mmap: do not blow on PROT_NONE MAP_FIXED holes in the
+ stack
+Patch-mainline: no, fixed differently
+References: bnc#1039348, bnc#1045340, bnc#1045406
+
+"mm: enlarge stack guard gap" has introduced a regression in some JVM
+environments which are trying to implement their own stack guard page.
+hey are punching a new MAP_FIXED mapping inside the existing stack
+Vma:
+(gdb) bt
+ #0 0xf7fd9f89 in __kernel_vsyscall ()
+ #1 0xf7454508 in mmap () from /lib/libc.so.6
+ #2 0xf7a51cdb in os::commit_memory(char*, unsigned int, bool) () from /opt/novell/groupwise/client/java/lib/i386/client/libjvm.so
+ #3 0xf7afd1be in JavaThread::create_stack_guard_pages() () from /opt/novell/groupwise/client/java/lib/i386/client/libjvm.so
+ #4 0xf7afedd3 in Threads::create_vm(JavaVMInitArgs*, bool*) () from /opt/novell/groupwise/client/java/lib/i386/client/libjvm.so
+ #5 0xf7960566 in JNI_CreateJavaVM () from /opt/novell/groupwise/client/java/lib/i386/client/libjvm.so
+ #6 0x08048dc3 in LaunchJavaVm (argc=0, argv=0xffffd528, szPathToClientDir=0xffffd068 "/opt/novell/groupwise/client") at grpwisex.cpp:380
+ #7 0x080497a3 in main (argc=1, argv=0xffffd524) at grpwisex.cpp:473
+
+This will confuse expandable_stack_area into thinking that the stack
+expansion would in fact get us too close to an existing non-stack vma
+which is a correct behavior wrt. safety. It is a real regression on
+the other hand. Let's work around the problem by considering PROT_NONE
+mapping as a part of the stack. This is a gros hack but overflowing to
+such a mapping would trap anyway an we only can hope that usespace
+knows what it is doing and handle it propely.
+
+Debugged-by: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Michal Hocko <mhocko@suse.com>
+
+---
+ mm/mmap.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 620e1e4e3296..1e722532f6c1 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2378,7 +2378,8 @@ unsigned long expandable_stack_area(struct vm_area_struct *vma,
+ if (!next)
+ goto out;
+
+- if (next->vm_flags & VM_GROWSUP) {
++ /* see comment in !CONFIG_STACK_GROWSUP */
++ if ((next->vm_flags & VM_GROWSUP) || !(next->vm_flags & (VM_WRITE|VM_READ))) {
+ guard_gap = min(guard_gap, next->vm_start - address);
+ goto out;
+ }
+@@ -2457,8 +2458,13 @@ unsigned long expandable_stack_area(struct vm_area_struct *vma,
+ * That's only ok if it's the same stack mapping
+ * that has gotten split or there is sufficient gap
+ * between mappings
++ *
++ * Please note that some application (e.g. Java) punches
++ * MAP_FIXED inside the stack and then PROT_NONE it
++ * to mimic a stack guard which will clash with our protection
++ * so pretend tha PROT_NONE vmas are OK
+ */
+- if (prev->vm_flags & VM_GROWSDOWN) {
++ if ((prev->vm_flags & VM_GROWSDOWN) || !(prev->vm_flags & (VM_WRITE|VM_READ))) {
+ guard_gap = min(guard_gap, address - prev->vm_end);
+ goto out;
+ }
+--
+2.11.0
+
diff --git a/patches.fixes/0002-mm-do-not-collapse-stack-gap-into-THP-fix.patch b/patches.fixes/0002-mm-do-not-collapse-stack-gap-into-THP-fix.patch
new file mode 100644
index 0000000000..b4d1d8e37d
--- /dev/null
+++ b/patches.fixes/0002-mm-do-not-collapse-stack-gap-into-THP-fix.patch
@@ -0,0 +1,40 @@
+From: Michal Hocko <mhocko@suse.com>
+Subject: [PATCH] mm: do not collapse stack gap into THP
+Patch-mainline: no, fixup for patches.fixes/0002-mm-do-not-collapse-stack-gap-into-THP.patch
+References: bnc#1039348, CVE-2017-1000364
+
+Willy Tarreau has noticed that my backport
+(patches.fixes/0002-mm-do-not-collapse-stack-gap-into-THP.patch) is not
+accounting THP_FAULT_FALLBACK properly. This is not a disaster because the
+counter is only exported to the userspace and nothing really crucial depends on
+it. It is good to fix though. Also we can move stack_guard_area check up
+because there is no real need to even try anon_vma_prepare resp.
+khugepaged_enter on the gap area.
+
+Signed-off-by: Michal Hocko <mhocko@suse.com>
+
+---
+ mm/huge_memory.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -679,13 +679,15 @@ int do_huge_pmd_anonymous_page(struct mm
+ pte_t *pte;
+
+ if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
++ if (stack_guard_area(vma, haddr) ||
++ stack_guard_area(vma, haddr + HPAGE_PMD_SIZE)) {
++ count_vm_event(THP_FAULT_FALLBACK);
++ goto out;
++ }
+ if (unlikely(anon_vma_prepare(vma)))
+ return VM_FAULT_OOM;
+ if (unlikely(khugepaged_enter(vma)))
+ return VM_FAULT_OOM;
+- if (stack_guard_area(vma, haddr) ||
+- stack_guard_area(vma, haddr + HPAGE_PMD_SIZE))
+- goto out;
+ page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
+ vma, haddr, numa_node_id(), 0);
+ if (unlikely(!page)) {
diff --git a/patches.fixes/mm-thp-fix-collapsing-of-hugepages-on-madvise.patch b/patches.fixes/mm-thp-fix-collapsing-of-hugepages-on-madvise.patch
index 532fb7ab9c..1f63733ec4 100644
--- a/patches.fixes/mm-thp-fix-collapsing-of-hugepages-on-madvise.patch
+++ b/patches.fixes/mm-thp-fix-collapsing-of-hugepages-on-madvise.patch
@@ -86,16 +86,16 @@ Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
}
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
-@@ -673,7 +673,7 @@ int do_huge_pmd_anonymous_page(struct mm
- if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
+@@ -678,7 +678,7 @@ int do_huge_pmd_anonymous_page(struct mm
+ }
if (unlikely(anon_vma_prepare(vma)))
return VM_FAULT_OOM;
- if (unlikely(khugepaged_enter(vma)))
+ if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
return VM_FAULT_OOM;
- if (stack_guard_area(vma, haddr) ||
- stack_guard_area(vma, haddr + HPAGE_PMD_SIZE))
-@@ -1491,7 +1491,7 @@ int hugepage_madvise(struct vm_area_stru
+ gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma),
+ 0);
+@@ -1493,7 +1493,7 @@ int hugepage_madvise(struct vm_area_stru
* register it here without waiting a page fault that
* may not happen any time soon.
*/
@@ -104,7 +104,7 @@ Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
return -ENOMEM;
break;
case MADV_NOHUGEPAGE:
-@@ -1623,7 +1623,8 @@ int __khugepaged_enter(struct mm_struct
+@@ -1625,7 +1625,8 @@ int __khugepaged_enter(struct mm_struct
return 0;
}
@@ -114,7 +114,7 @@ Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
{
unsigned long hstart, hend;
if (!vma->anon_vma)
-@@ -1639,11 +1640,11 @@ int khugepaged_enter_vma_merge(struct vm
+@@ -1641,11 +1642,11 @@ int khugepaged_enter_vma_merge(struct vm
* If is_pfn_mapping() is true is_learn_pfn_mapping() must be
* true too, verify it here.
*/
diff --git a/patches.suse/mm-thp-allocate-transparent-hugepages-on-local-node.patch b/patches.suse/mm-thp-allocate-transparent-hugepages-on-local-node.patch
index 8676d3c51d..9edb82efd9 100644
--- a/patches.suse/mm-thp-allocate-transparent-hugepages-on-local-node.patch
+++ b/patches.suse/mm-thp-allocate-transparent-hugepages-on-local-node.patch
@@ -78,10 +78,10 @@ Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
struct page *page;
unsigned long haddr = address & HPAGE_PMD_MASK;
pte_t *pte;
-@@ -686,8 +678,9 @@ int do_huge_pmd_anonymous_page(struct mm
- if (stack_guard_area(vma, haddr) ||
- stack_guard_area(vma, haddr + HPAGE_PMD_SIZE))
- goto out;
+@@ -688,8 +680,9 @@ int do_huge_pmd_anonymous_page(struct mm
+ return VM_FAULT_OOM;
+ if (unlikely(khugepaged_enter(vma)))
+ return VM_FAULT_OOM;
- page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
- vma, haddr, numa_node_id(), 0);
+ gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma),
@@ -90,7 +90,7 @@ Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
if (unlikely(!page)) {
count_vm_event(THP_FAULT_FALLBACK);
goto out;
-@@ -914,10 +907,12 @@ int do_huge_pmd_wp_page(struct mm_struct
+@@ -916,10 +909,12 @@ int do_huge_pmd_wp_page(struct mm_struct
spin_unlock(&mm->page_table_lock);
if (transparent_hugepage_enabled(vma) &&
@@ -107,7 +107,7 @@ Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
new_page = NULL;
if (unlikely(!new_page)) {
-@@ -1833,6 +1828,7 @@ static void collapse_huge_page(struct mm
+@@ -1835,6 +1830,7 @@ static void collapse_huge_page(struct mm
spinlock_t *ptl;
int isolated;
unsigned long hstart, hend;
@@ -115,7 +115,7 @@ Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
#ifndef CONFIG_NUMA
-@@ -1851,8 +1847,8 @@ static void collapse_huge_page(struct mm
+@@ -1853,8 +1849,8 @@ static void collapse_huge_page(struct mm
* mmap_sem in read mode is good idea also to allow greater
* scalability.
*/
diff --git a/series.conf b/series.conf
index 93cc97de73..f3f81c0866 100644
--- a/series.conf
+++ b/series.conf
@@ -1865,9 +1865,14 @@
patches.fixes/mm-gup-close-FOLL-MAP_PRIVATE-race.patch
patches.fixes/mm-mempolicy.c-fix-error-handling-in-set_mempolicy-a.patch
+ # Fix for CVE-2017-1000364 - note that this has been fixed differently in
+ # upstream (1be7107fbe18). The backport would be rather non-trivial so we
+ # kept with the original proposal
patches.fixes/0001-mm-enlarge-stack-guard-gap.patch
patches.fixes/0001-mm-enlarge-stack-guard-gap.patch-fix
patches.fixes/0002-mm-do-not-collapse-stack-gap-into-THP.patch
+ patches.fixes/0002-mm-do-not-collapse-stack-gap-into-THP-fix.patch
+ patches.fixes/0001-mm-mmap-do-not-blow-on-PROT_NONE-MAP_FIXED-holes-in-.patch
# Please put all VM/FS patches which are not tmpfs/shmem related
# above