Home Home > GIT Browse > SLE12-SP3-AZURE
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKernel Build Daemon <kbuild@suse.de>2019-06-15 07:00:17 +0200
committerKernel Build Daemon <kbuild@suse.de>2019-06-15 07:00:17 +0200
commit6920da291fbd12244fa0c7a61728e094812304cd (patch)
treee01be2ff11f6e050c3c0690a8882697d95eec1a1
parentf3c710e01b85ed7829809c1c04d40799c505349c (diff)
parentf85e657112d159b1050a6e6269a1f628d66556ef (diff)
Merge branch 'SLE12-SP3' into SLE12-SP3-AZURESLE12-SP3-AZURE
-rw-r--r--patches.drivers/scsi-mpt3sas_ctl-fix-double-fetch-bug-in-ctl_ioctl_main45
-rw-r--r--patches.fixes/fs-prevent-page-refcount-overflow-in-pipe_buf_get.patch163
-rw-r--r--patches.fixes/mm-add-try_get_page-helper-function.patch66
-rw-r--r--patches.fixes/mm-gup-ensure-real-head-page-is-ref-counted-when-using-hugepages.patch101
-rw-r--r--patches.fixes/mm-gup-remove-broken-vm_bug_on_page-compound-check-for-hugepages.patch67
-rw-r--r--patches.fixes/mm-make-page-ref-count-overflow-check-tighter-and-more-explicit.patch79
-rw-r--r--patches.fixes/mm-prevent-get_user_pages-from-overflowing-page-refcount.patch285
-rw-r--r--patches.fixes/pipe-add-pipe_buf_get-helper.patch67
-rw-r--r--patches.fixes/signals-avoid-random-wakeups-in-sigsuspend.patch45
-rw-r--r--patches.kabi/fs-prevent-page-refcount-overflow-in-pipe_buf_get-kabi.patch58
-rw-r--r--patches.kernel.org/4.4.177-067-net-nfc-Fix-NULL-dereference-on-nfc_llcp_buil.patch2
-rw-r--r--patches.kernel.org/4.4.177-137-mdio_bus-Fix-use-after-free-on-device_registe.patch2
-rw-r--r--series.conf13
13 files changed, 991 insertions, 2 deletions
diff --git a/patches.drivers/scsi-mpt3sas_ctl-fix-double-fetch-bug-in-ctl_ioctl_main b/patches.drivers/scsi-mpt3sas_ctl-fix-double-fetch-bug-in-ctl_ioctl_main
new file mode 100644
index 0000000000..3bb9ba6856
--- /dev/null
+++ b/patches.drivers/scsi-mpt3sas_ctl-fix-double-fetch-bug-in-ctl_ioctl_main
@@ -0,0 +1,45 @@
+From: Gen Zhang <blackgod016574@gmail.com>
+Date: Thu, 30 May 2019 09:10:30 +0800
+Subject: scsi: mpt3sas_ctl: fix double-fetch bug in _ctl_ioctl_main()
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git
+Git-commit: 86e5aca7fa2927060839f3e3b40c8bd65a7e8d1e
+Patch-mainline: Queued in subsystem maintainer repo
+References: bsc#1136922 CVE-2019-12456
+
+In _ctl_ioctl_main(), 'ioctl_header' is fetched the first time from
+userspace. 'ioctl_header.ioc_number' is then checked. The legal result is
+saved to 'ioc'. Then, in condition MPT3COMMAND, the whole struct is fetched
+again from the userspace. Then _ctl_do_mpt_command() is called, 'ioc' and
+'karg' as inputs.
+
+However, a malicious user can change the 'ioc_number' between the two
+fetches, which will cause a potential security issues. Moreover, a
+malicious user can provide a valid 'ioc_number' to pass the check in first
+fetch, and then modify it in the second fetch.
+
+To fix this, we need to recheck the 'ioc_number' in the second fetch.
+
+Signed-off-by: Gen Zhang <blackgod016574@gmail.com>
+Acked-by: Suganath Prabu S <suganath-prabu.subramani@broadcom.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Acked-by: Lee Duncan <lduncan@suse.com>
+---
+ drivers/scsi/mpt3sas/mpt3sas_ctl.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+index b2bb47c14d35..5181c03e82a6 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+@@ -2319,6 +2319,10 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
+ break;
+ }
+
++ if (karg.hdr.ioc_number != ioctl_header.ioc_number) {
++ ret = -EINVAL;
++ break;
++ }
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) {
+ uarg = arg;
+ ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf);
+
diff --git a/patches.fixes/fs-prevent-page-refcount-overflow-in-pipe_buf_get.patch b/patches.fixes/fs-prevent-page-refcount-overflow-in-pipe_buf_get.patch
new file mode 100644
index 0000000000..77ebb2064b
--- /dev/null
+++ b/patches.fixes/fs-prevent-page-refcount-overflow-in-pipe_buf_get.patch
@@ -0,0 +1,163 @@
+From: Matthew Wilcox <willy@infradead.org>
+Date: Fri, 5 Apr 2019 14:02:10 -0700
+Subject: fs: prevent page refcount overflow in pipe_buf_get
+Git-commit: 15fab63e1e57be9fdb5eec1bbc5916e9825e9acb
+Patch-mainline: v5.1-rc5
+References: CVE-2019-11487, bsc#1133190
+
+Change pipe_buf_get() to return a bool indicating whether it succeeded
+in raising the refcount of the page (if the thing in the pipe is a page).
+This removes another mechanism for overflowing the page refcount. All
+callers converted to handle a failure.
+
+Reported-by: Jann Horn <jannh@google.com>
+Signed-off-by: Matthew Wilcox <willy@infradead.org>
+Cc: stable@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+---
+ fs/fuse/dev.c | 12 ++++++------
+ fs/pipe.c | 4 ++--
+ fs/splice.c | 12 ++++++++++--
+ include/linux/pipe_fs_i.h | 10 ++++++----
+ kernel/trace/trace.c | 6 +++++-
+ 5 files changed, 29 insertions(+), 15 deletions(-)
+
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -2035,10 +2035,8 @@ static ssize_t fuse_dev_splice_write(str
+ rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
+
+ ret = -EINVAL;
+- if (rem < len) {
+- pipe_unlock(pipe);
+- goto out;
+- }
++ if (rem < len)
++ goto out_free;
+
+ rem = len;
+ while (rem) {
+@@ -2056,7 +2054,9 @@ static ssize_t fuse_dev_splice_write(str
+ pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
+ pipe->nrbufs--;
+ } else {
+- pipe_buf_get(pipe, ibuf);
++ if (!pipe_buf_get(pipe, ibuf))
++ goto out_free;
++
+ *obuf = *ibuf;
+ obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
+ obuf->len = rem;
+@@ -2079,13 +2079,13 @@ static ssize_t fuse_dev_splice_write(str
+ ret = fuse_dev_do_write(fud, &cs, len);
+
+ pipe_lock(pipe);
++out_free:
+ for (idx = 0; idx < nbuf; idx++) {
+ struct pipe_buffer *buf = &bufs[idx];
+ buf->ops->release(pipe, buf);
+ }
+ pipe_unlock(pipe);
+
+-out:
+ kfree(bufs);
+ return ret;
+ }
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -178,9 +178,9 @@ EXPORT_SYMBOL(generic_pipe_buf_steal);
+ * in the tee() system call, when we duplicate the buffers in one
+ * pipe into another.
+ */
+-void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
++bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
+ {
+- page_cache_get(buf->page);
++ return try_get_page(buf->page);
+ }
+ EXPORT_SYMBOL(generic_pipe_buf_get);
+
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -1877,7 +1877,11 @@ retry:
+ * Get a reference to this pipe buffer,
+ * so we can copy the contents over.
+ */
+- pipe_buf_get(ipipe, ibuf);
++ if (!pipe_buf_get(ipipe, ibuf)) {
++ if (ret == 0)
++ ret = -EFAULT;
++ break;
++ }
+ *obuf = *ibuf;
+
+ /*
+@@ -1949,7 +1953,11 @@ static int link_pipe(struct pipe_inode_i
+ * Get a reference to this pipe buffer,
+ * so we can copy the contents over.
+ */
+- pipe_buf_get(ipipe, ibuf);
++ if (!pipe_buf_get(ipipe, ibuf)) {
++ if (ret == 0)
++ ret = -EFAULT;
++ break;
++ }
+
+ obuf = opipe->bufs + nbuf;
+ *obuf = *ibuf;
+--- a/include/linux/pipe_fs_i.h
++++ b/include/linux/pipe_fs_i.h
+@@ -112,18 +112,20 @@ struct pipe_buf_operations {
+ /*
+ * Get a reference to the pipe buffer.
+ */
+- void (*get)(struct pipe_inode_info *, struct pipe_buffer *);
++ bool (*get)(struct pipe_inode_info *, struct pipe_buffer *);
+ };
+
+ /**
+ * pipe_buf_get - get a reference to a pipe_buffer
+ * @pipe: the pipe that the buffer belongs to
+ * @buf: the buffer to get a reference to
++ *
++ * Return: %true if the reference was successfully obtained.
+ */
+-static inline void pipe_buf_get(struct pipe_inode_info *pipe,
++static inline __must_check bool pipe_buf_get(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+ {
+- buf->ops->get(pipe, buf);
++ return buf->ops->get(pipe, buf);
+ }
+
+ /* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual
+@@ -148,7 +150,7 @@ struct pipe_inode_info *alloc_pipe_info(
+ void free_pipe_info(struct pipe_inode_info *);
+
+ /* Generic pipe buffer ops functions */
+-void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
++bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
+ int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
+ int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
+ void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -5731,12 +5731,16 @@ static void buffer_pipe_buf_release(stru
+ buf->private = 0;
+ }
+
+-static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
++static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+ {
+ struct buffer_ref *ref = (struct buffer_ref *)buf->private;
+
++ if (ref->ref > INT_MAX/2)
++ return false;
++
+ ref->ref++;
++ return true;
+ }
+
+ /* Pipe buffer operations for a buffer. */
diff --git a/patches.fixes/mm-add-try_get_page-helper-function.patch b/patches.fixes/mm-add-try_get_page-helper-function.patch
new file mode 100644
index 0000000000..7ed0d87755
--- /dev/null
+++ b/patches.fixes/mm-add-try_get_page-helper-function.patch
@@ -0,0 +1,66 @@
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Thu, 11 Apr 2019 10:14:59 -0700
+Subject: mm: add 'try_get_page()' helper function
+Git-commit: 88b1a17dfc3ed7728316478fae0f5ad508f50397
+Patch-mainline: v5.1-rc5
+References: CVE-2019-11487, bsc#1133190
+
+[ 4.4 backport: get_page() is more complicated due to special handling
+ of tail pages via __get_page_tail(). But in all cases, eventually the
+ compound head page's refcount is incremented. So try_get_page() just
+ checks compound head's refcount for overflow and then simply calls
+ get_page(). ]
+
+This is the same as the traditional 'get_page()' function, but instead
+of unconditionally incrementing the reference count of the page, it only
+does so if the count was "safe". It returns whether the reference count
+was incremented (and is marked __must_check, since the caller obviously
+has to be aware of it).
+
+Also like 'get_page()', you can't use this function unless you already
+had a reference to the page. The intent is that you can use this
+exactly like get_page(), but in situations where you want to limit the
+maximum reference count.
+
+The code currently does an unconditional WARN_ON_ONCE() if we ever hit
+the reference count issues (either zero or negative), as a notification
+that the conditional non-increment actually happened.
+
+NOTE! The count access for the "safety" check is inherently racy, but
+that doesn't matter since the buffer we use is basically half the range
+of the reference count (ie we look at the sign of the count).
+
+Acked-by: Matthew Wilcox <willy@infradead.org>
+Cc: Jann Horn <jannh@google.com>
+Cc: stable@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+---
+ include/linux/mm.h | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -743,6 +743,22 @@ static inline void get_page(struct page
+ get_zone_device_page(page);
+ }
+
++static inline __must_check bool try_get_page(struct page *page)
++{
++ struct page *head = compound_head(page);
++
++ /*
++ * get_page() increases always head page's refcount, either directly or
++ * via __get_page_tail() for tail page, so we check that
++ */
++ if (WARN_ON_ONCE(page_ref_count(head) <= 0))
++ return false;
++
++ get_page(page);
++ return true;
++}
++
++
+ #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
+ #define SECTION_IN_PAGE_FLAGS
+ #endif
diff --git a/patches.fixes/mm-gup-ensure-real-head-page-is-ref-counted-when-using-hugepages.patch b/patches.fixes/mm-gup-ensure-real-head-page-is-ref-counted-when-using-hugepages.patch
new file mode 100644
index 0000000000..5708668b9f
--- /dev/null
+++ b/patches.fixes/mm-gup-ensure-real-head-page-is-ref-counted-when-using-hugepages.patch
@@ -0,0 +1,101 @@
+From: Punit Agrawal <punit.agrawal@arm.com>
+Date: Thu, 6 Jul 2017 15:39:39 -0700
+Subject: mm, gup: ensure real head page is ref-counted when using hugepages
+Git-commit: d63206ee32b6e64b0e12d46e5d6004afd9913713
+Patch-mainline: v4.13-rc1
+References: CVE-2019-11487, bsc#1133190, prerequisity
+
+When speculatively taking references to a hugepage using
+page_cache_add_speculative() in gup_huge_pmd(), it is assumed that the
+page returned by pmd_page() is the head page. Although normally true,
+this assumption doesn't hold when the hugepage comprises of successive
+page table entries such as when using contiguous bit on arm64 at PTE or
+PMD levels.
+
+This can be addressed by ensuring that the page passed to
+page_cache_add_speculative() is the real head or by de-referencing the
+head page within the function.
+
+We take the first approach to keep the usage pattern aligned with
+page_cache_get_speculative() where users already pass the appropriate
+page, i.e., the de-referenced head.
+
+Apply the same logic to fix gup_huge_[pud|pgd]() as well.
+
+[punit.agrawal@arm.com: fix arm64 ltp failure]
+ Link: http://lkml.kernel.org/r/20170619170145.25577-5-punit.agrawal@arm.com
+Link: http://lkml.kernel.org/r/20170522133604.11392-3-punit.agrawal@arm.com
+Signed-off-by: Punit Agrawal <punit.agrawal@arm.com>
+Acked-by: Steve Capper <steve.capper@arm.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
+Cc: Mike Kravetz <mike.kravetz@oracle.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+---
+ mm/gup.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -1157,8 +1157,7 @@ static int gup_huge_pmd(pmd_t orig, pmd_
+ return 0;
+
+ refs = 0;
+- head = pmd_page(orig);
+- page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
++ page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+ tail = page;
+ do {
+ pages[*nr] = page;
+@@ -1167,6 +1166,7 @@ static int gup_huge_pmd(pmd_t orig, pmd_
+ refs++;
+ } while (addr += PAGE_SIZE, addr != end);
+
++ head = compound_head(pmd_page(orig));
+ if (!page_cache_add_speculative(head, refs)) {
+ *nr -= refs;
+ return 0;
+@@ -1203,8 +1203,7 @@ static int gup_huge_pud(pud_t orig, pud_
+ return 0;
+
+ refs = 0;
+- head = pud_page(orig);
+- page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
++ page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+ tail = page;
+ do {
+ pages[*nr] = page;
+@@ -1213,6 +1212,7 @@ static int gup_huge_pud(pud_t orig, pud_
+ refs++;
+ } while (addr += PAGE_SIZE, addr != end);
+
++ head = compound_head(pud_page(orig));
+ if (!page_cache_add_speculative(head, refs)) {
+ *nr -= refs;
+ return 0;
+@@ -1245,8 +1245,7 @@ static int gup_huge_pgd(pgd_t orig, pgd_
+ return 0;
+
+ refs = 0;
+- head = pgd_page(orig);
+- page = head + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
++ page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
+ tail = page;
+ do {
+ pages[*nr] = page;
+@@ -1255,6 +1254,7 @@ static int gup_huge_pgd(pgd_t orig, pgd_
+ refs++;
+ } while (addr += PAGE_SIZE, addr != end);
+
++ head = compound_head(pgd_page(orig));
+ if (!page_cache_add_speculative(head, refs)) {
+ *nr -= refs;
+ return 0;
diff --git a/patches.fixes/mm-gup-remove-broken-vm_bug_on_page-compound-check-for-hugepages.patch b/patches.fixes/mm-gup-remove-broken-vm_bug_on_page-compound-check-for-hugepages.patch
new file mode 100644
index 0000000000..279b1f372a
--- /dev/null
+++ b/patches.fixes/mm-gup-remove-broken-vm_bug_on_page-compound-check-for-hugepages.patch
@@ -0,0 +1,67 @@
+From: Will Deacon <will.deacon@arm.com>
+Date: Thu, 6 Jul 2017 15:39:36 -0700
+Subject: mm, gup: remove broken VM_BUG_ON_PAGE compound check for hugepages
+Git-commit: a3e328556d41bb61c55f9dfcc62d6a826ea97b85
+Patch-mainline: v4.13-rc1
+References: CVE-2019-11487, bsc#1133190, prerequisity
+
+When operating on hugepages with DEBUG_VM enabled, the GUP code checks
+the compound head for each tail page prior to calling
+page_cache_add_speculative. This is broken, because on the fast-GUP
+path (where we don't hold any page table locks) we can be racing with a
+concurrent invocation of split_huge_page_to_list.
+
+split_huge_page_to_list deals with this race by using page_ref_freeze to
+freeze the page and force concurrent GUPs to fail whilst the component
+pages are modified. This modification includes clearing the
+compound_head field for the tail pages, so checking this prior to a
+successful call to page_cache_add_speculative can lead to false
+positives: In fact, page_cache_add_speculative *already* has this check
+once the page refcount has been successfully updated, so we can simply
+remove the broken calls to VM_BUG_ON_PAGE.
+
+Link: http://lkml.kernel.org/r/20170522133604.11392-2-punit.agrawal@arm.com
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Punit Agrawal <punit.agrawal@arm.com>
+Acked-by: Steve Capper <steve.capper@arm.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Mike Kravetz <mike.kravetz@oracle.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+---
+ mm/gup.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -1161,7 +1161,6 @@ static int gup_huge_pmd(pmd_t orig, pmd_
+ page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+ tail = page;
+ do {
+- VM_BUG_ON_PAGE(compound_head(page) != head, page);
+ pages[*nr] = page;
+ (*nr)++;
+ page++;
+@@ -1208,7 +1207,6 @@ static int gup_huge_pud(pud_t orig, pud_
+ page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+ tail = page;
+ do {
+- VM_BUG_ON_PAGE(compound_head(page) != head, page);
+ pages[*nr] = page;
+ (*nr)++;
+ page++;
+@@ -1251,7 +1249,6 @@ static int gup_huge_pgd(pgd_t orig, pgd_
+ page = head + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
+ tail = page;
+ do {
+- VM_BUG_ON_PAGE(compound_head(page) != head, page);
+ pages[*nr] = page;
+ (*nr)++;
+ page++;
diff --git a/patches.fixes/mm-make-page-ref-count-overflow-check-tighter-and-more-explicit.patch b/patches.fixes/mm-make-page-ref-count-overflow-check-tighter-and-more-explicit.patch
new file mode 100644
index 0000000000..6eb4e85b46
--- /dev/null
+++ b/patches.fixes/mm-make-page-ref-count-overflow-check-tighter-and-more-explicit.patch
@@ -0,0 +1,79 @@
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Thu, 11 Apr 2019 10:06:20 -0700
+Subject: mm: make page ref count overflow check tighter and more explicit
+Git-commit: f958d7b528b1b40c44cfda5eabe2d82760d868c3
+Patch-mainline: v5.1-rc5
+References: CVE-2019-11487, bsc#1133190
+
+[ 4.4 backport: page_ref_count() doesn't exist, introduce it to reduce churn.
+ Change also two similar checks in mm/internal.h ]
+
+We have a VM_BUG_ON() to check that the page reference count doesn't
+underflow (or get close to overflow) by checking the sign of the count.
+
+That's all fine, but we actually want to allow people to use a "get page
+ref unless it's already very high" helper function, and we want that one
+to use the sign of the page ref (without triggering this VM_BUG_ON).
+
+Change the VM_BUG_ON to only check for small underflows (or _very_ close
+to overflowing), and ignore overflows which have strayed into negative
+territory.
+
+Acked-by: Matthew Wilcox <willy@infradead.org>
+Cc: Jann Horn <jannh@google.com>
+Cc: stable@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+---
+ include/linux/mm.h | 11 ++++++++++-
+ mm/internal.h | 5 +++--
+ 2 files changed, 13 insertions(+), 3 deletions(-)
+
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -715,6 +715,15 @@ static inline bool is_zone_device_page(c
+ }
+ #endif
+
++static inline int page_ref_count(struct page *page)
++{
++ return atomic_read(&page->_count);
++}
++
++/* 127: arbitrary random number, small enough to assemble well */
++#define page_ref_zero_or_close_to_overflow(page) \
++ ((unsigned int) page_ref_count(page) + 127u <= 127u)
++
+ static inline void get_page(struct page *page)
+ {
+ if (unlikely(PageTail(page)))
+@@ -727,7 +736,7 @@ static inline void get_page(struct page
+ * Getting a normal page or the head of a compound page
+ * requires to already have an elevated page->_count.
+ */
+- VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
++ VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
+ atomic_inc(&page->_count);
+
+ if (unlikely(is_zone_device_page(page)))
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -82,7 +82,8 @@ static inline void __get_page_tail_foll(
+ * speculative page access (like in
+ * page_cache_get_speculative()) on tail pages.
+ */
+- VM_BUG_ON_PAGE(atomic_read(&compound_head(page)->_count) <= 0, page);
++ VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(compound_head(page)),
++ page);
+ if (get_page_head)
+ atomic_inc(&compound_head(page)->_count);
+ get_huge_page_tail(page);
+@@ -107,7 +108,7 @@ static inline void get_page_foll(struct
+ * Getting a normal page or the head of a compound page
+ * requires to already have an elevated page->_count.
+ */
+- VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
++ VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
+ atomic_inc(&page->_count);
+ }
+ }
diff --git a/patches.fixes/mm-prevent-get_user_pages-from-overflowing-page-refcount.patch b/patches.fixes/mm-prevent-get_user_pages-from-overflowing-page-refcount.patch
new file mode 100644
index 0000000000..c3260763d9
--- /dev/null
+++ b/patches.fixes/mm-prevent-get_user_pages-from-overflowing-page-refcount.patch
@@ -0,0 +1,285 @@
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Thu, 11 Apr 2019 10:49:19 -0700
+Subject: mm: prevent get_user_pages() from overflowing page refcount
+Git-commit: 8fde12ca79aff9b5ba951fce1a2641901b8d8e64
+Patch-mainline: v5.1-rc5
+References: CVE-2019-11487, bsc#1133190
+
+[ 4.4 backport: there's get_page_foll(), so add try_get_page()-like checks
+ in there, enabled by a new parameter, which is false where
+ upstream patch doesn't replace get_page() with try_get_page()
+ (the THP and hugetlb callers).
+ In gup_pte_range(), we don't expect tail pages, so just check
+ page ref count instead of try_get_compound_head()
+ Also patch arch-specific variants of gup.c for x86 and s390,
+ ignore those we don't ship ]
+
+If the page refcount wraps around past zero, it will be freed while
+there are still four billion references to it. One of the possible
+avenues for an attacker to try to make this happen is by doing direct IO
+on a page multiple times. This patch makes get_user_pages() refuse to
+take a new page reference if there are already more than two billion
+references to the page.
+
+Reported-by: Jann Horn <jannh@google.com>
+Acked-by: Matthew Wilcox <willy@infradead.org>
+Cc: stable@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+---
+ arch/s390/mm/gup.c | 6 ++++--
+ arch/x86/mm/gup.c | 15 +++++++++++++--
+ mm/gup.c | 40 ++++++++++++++++++++++++++++++++--------
+ mm/huge_memory.c | 2 +-
+ mm/hugetlb.c | 18 ++++++++++++++++--
+ mm/internal.h | 12 +++++++++---
+ 6 files changed, 75 insertions(+), 18 deletions(-)
+
+--- a/arch/s390/mm/gup.c
++++ b/arch/s390/mm/gup.c
+@@ -37,7 +37,8 @@ static inline int gup_pte_range(pmd_t *p
+ return 0;
+ VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+ page = pte_page(pte);
+- if (!page_cache_get_speculative(page))
++ if (unlikely(WARN_ON_ONCE(page_ref_count(page) < 0)
++ || !page_cache_get_speculative(page)))
+ return 0;
+ if (unlikely(pte_val(pte) != pte_val(*ptep))) {
+ put_page(page);
+@@ -76,7 +77,8 @@ static inline int gup_huge_pmd(pmd_t *pm
+ refs++;
+ } while (addr += PAGE_SIZE, addr != end);
+
+- if (!page_cache_add_speculative(head, refs)) {
++ if (unlikely(WARN_ON_ONCE(page_ref_count(head) < 0)
++ || !page_cache_add_speculative(head, refs))) {
+ *nr -= refs;
+ return 0;
+ }
+--- a/arch/x86/mm/gup.c
++++ b/arch/x86/mm/gup.c
+@@ -133,7 +133,11 @@ static noinline int gup_pte_range(pmd_t
+ }
+ VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+ page = pte_page(pte);
+- get_page(page);
++ if (unlikely(!try_get_page(page))) {
++ put_dev_pagemap(pgmap);
++ pte_unmap(ptep);
++ return 0;
++ }
+ put_dev_pagemap(pgmap);
+ SetPageReferenced(page);
+ pages[*nr] = page;
+@@ -171,7 +175,10 @@ static int __gup_device_huge_pmd(pmd_t p
+ }
+ SetPageReferenced(page);
+ pages[*nr] = page;
+- get_page(page);
++ if (unlikely(!try_get_page(page))) {
++ put_dev_pagemap(pgmap);
++ return 0;
++ }
+ put_dev_pagemap(pgmap);
+ (*nr)++;
+ pfn++;
+@@ -197,6 +204,8 @@ static noinline int gup_huge_pmd(pmd_t p
+
+ refs = 0;
+ head = pmd_page(pmd);
++ if (WARN_ON_ONCE(page_ref_count(head) <= 0))
++ return 0;
+ page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+ do {
+ VM_BUG_ON_PAGE(compound_head(page) != head, page);
+@@ -269,6 +278,8 @@ static noinline int gup_huge_pud(pud_t p
+
+ refs = 0;
+ head = pud_page(pud);
++ if (WARN_ON_ONCE(page_ref_count(head) <= 0))
++ return 0;
+ page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+ do {
+ VM_BUG_ON_PAGE(compound_head(page) != head, page);
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -139,7 +139,11 @@ retry:
+ }
+
+ if (flags & FOLL_GET) {
+- get_page_foll(page);
++ if (!get_page_foll(page, true)) {
++ page = ERR_PTR(-ENOMEM);
++ put_dev_pagemap(pgmap);
++ goto out;
++ }
+
+ /* drop the pgmap reference now that we hold the page */
+ if (pgmap) {
+@@ -315,7 +319,10 @@ static int get_gate_page(struct mm_struc
+ goto unmap;
+ *page = pte_page(*pte);
+ }
+- get_page(*page);
++ if (unlikely(!try_get_page(*page))) {
++ ret = -ENOMEM;
++ goto unmap;
++ }
+ out:
+ ret = 0;
+ unmap:
+@@ -1080,6 +1087,20 @@ struct page *get_dump_page(unsigned long
+ */
+ #ifdef CONFIG_HAVE_GENERIC_RCU_GUP
+
++/*
++ * Return the compund head page with ref appropriately incremented,
++ * or NULL if that failed.
++ */
++static inline struct page *try_get_compound_head(struct page *page, int refs)
++{
++ struct page *head = compound_head(page);
++ if (WARN_ON_ONCE(page_ref_count(head) < 0))
++ return NULL;
++ if (unlikely(!page_cache_add_speculative(head, refs)))
++ return NULL;
++ return head;
++}
++
+ #ifdef __HAVE_ARCH_PTE_SPECIAL
+ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
+ int write, struct page **pages, int *nr)
+@@ -1110,6 +1131,9 @@ static int gup_pte_range(pmd_t pmd, unsi
+ VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+ page = pte_page(pte);
+
++ if (WARN_ON_ONCE(page_ref_count(page) < 0))
++ goto pte_unmap;
++
+ if (!page_cache_get_speculative(page))
+ goto pte_unmap;
+
+@@ -1166,8 +1190,8 @@ static int gup_huge_pmd(pmd_t orig, pmd_
+ refs++;
+ } while (addr += PAGE_SIZE, addr != end);
+
+- head = compound_head(pmd_page(orig));
+- if (!page_cache_add_speculative(head, refs)) {
++ head = try_get_compound_head(pmd_page(orig), refs);
++ if (!head) {
+ *nr -= refs;
+ return 0;
+ }
+@@ -1212,8 +1236,8 @@ static int gup_huge_pud(pud_t orig, pud_
+ refs++;
+ } while (addr += PAGE_SIZE, addr != end);
+
+- head = compound_head(pud_page(orig));
+- if (!page_cache_add_speculative(head, refs)) {
++ head = try_get_compound_head(pud_page(orig), refs);
++ if (!head) {
+ *nr -= refs;
+ return 0;
+ }
+@@ -1254,8 +1278,8 @@ static int gup_huge_pgd(pgd_t orig, pgd_
+ refs++;
+ } while (addr += PAGE_SIZE, addr != end);
+
+- head = compound_head(pgd_page(orig));
+- if (!page_cache_add_speculative(head, refs)) {
++ head = try_get_compound_head(pgd_page(orig), refs);
++ if (!head) {
+ *nr -= refs;
+ return 0;
+ }
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1409,7 +1409,7 @@ struct page *follow_trans_huge_pmd(struc
+ page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
+ VM_BUG_ON_PAGE(!PageCompound(page), page);
+ if (flags & FOLL_GET)
+- get_page_foll(page);
++ get_page_foll(page, false);
+
+ out:
+ return page;
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3872,6 +3872,7 @@ long follow_hugetlb_page(struct mm_struc
+ unsigned long vaddr = *position;
+ unsigned long remainder = *nr_pages;
+ struct hstate *h = hstate_vma(vma);
++ int err = -EFAULT;
+
+ while (vaddr < vma->vm_end && remainder) {
+ pte_t *pte;
+@@ -3943,10 +3944,23 @@ long follow_hugetlb_page(struct mm_struc
+
+ pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
+ page = pte_page(huge_ptep_get(pte));
++
++ /*
++ * Instead of doing 'try_get_page()' below in the same_page
++ * loop, just check the count once here.
++ */
++ if (unlikely(page_count(page) <= 0)) {
++ if (pages) {
++ spin_unlock(ptl);
++ remainder = 0;
++ err = -ENOMEM;
++ break;
++ }
++ }
+ same_page:
+ if (pages) {
+ pages[i] = mem_map_offset(page, pfn_offset);
+- get_page_foll(pages[i]);
++ get_page_foll(pages[i], false);
+ }
+
+ if (vmas)
+@@ -3969,7 +3983,7 @@ same_page:
+ *nr_pages = remainder;
+ *position = vaddr;
+
+- return i ? i : -EFAULT;
++ return i ? i : err;
+ }
+
+ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -94,23 +94,29 @@ static inline void __get_page_tail_foll(
+ * follow_page() and it must be called while holding the proper PT
+ * lock while the pte (or pmd_trans_huge) is still mapping the page.
+ */
+-static inline void get_page_foll(struct page *page)
++static inline bool get_page_foll(struct page *page, bool check)
+ {
+- if (unlikely(PageTail(page)))
++ if (unlikely(PageTail(page))) {
+ /*
+ * This is safe only because
+ * __split_huge_page_refcount() can't run under
+ * get_page_foll() because we hold the proper PT lock.
+ */
++ if (check && WARN_ON_ONCE(
++ page_ref_count(compound_head(page)) <= 0))
++ return false;
+ __get_page_tail_foll(page, true);
+- else {
++ } else {
+ /*
+ * Getting a normal page or the head of a compound page
+ * requires to already have an elevated page->_count.
+ */
+ VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
++ if (check && WARN_ON_ONCE(page_ref_count(page) <= 0))
++ return false;
+ atomic_inc(&page->_count);
+ }
++ return true;
+ }
+
+ extern unsigned long highest_memmap_pfn;
diff --git a/patches.fixes/pipe-add-pipe_buf_get-helper.patch b/patches.fixes/pipe-add-pipe_buf_get-helper.patch
new file mode 100644
index 0000000000..a53542764f
--- /dev/null
+++ b/patches.fixes/pipe-add-pipe_buf_get-helper.patch
@@ -0,0 +1,67 @@
+From: Miklos Szeredi <mszeredi@redhat.com>
+Date: Tue, 27 Sep 2016 10:45:12 +0200
+Subject: pipe: add pipe_buf_get() helper
+Git-commit: 7bf2d1df80822ec056363627e2014990f068f7aa
+Patch-mainline: v4.9-rc1
+References: CVE-2019-11487, bsc#1133190, prerequisity
+
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+---
+ fs/fuse/dev.c | 2 +-
+ fs/splice.c | 4 ++--
+ include/linux/pipe_fs_i.h | 11 +++++++++++
+ 3 files changed, 14 insertions(+), 3 deletions(-)
+
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -2023,7 +2023,7 @@ static ssize_t fuse_dev_splice_write(str
+ pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
+ pipe->nrbufs--;
+ } else {
+- ibuf->ops->get(pipe, ibuf);
++ pipe_buf_get(pipe, ibuf);
+ *obuf = *ibuf;
+ obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
+ obuf->len = rem;
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -1876,7 +1876,7 @@ retry:
+ * Get a reference to this pipe buffer,
+ * so we can copy the contents over.
+ */
+- ibuf->ops->get(ipipe, ibuf);
++ pipe_buf_get(ipipe, ibuf);
+ *obuf = *ibuf;
+
+ /*
+@@ -1948,7 +1948,7 @@ static int link_pipe(struct pipe_inode_i
+ * Get a reference to this pipe buffer,
+ * so we can copy the contents over.
+ */
+- ibuf->ops->get(ipipe, ibuf);
++ pipe_buf_get(ipipe, ibuf);
+
+ obuf = opipe->bufs + nbuf;
+ *obuf = *ibuf;
+--- a/include/linux/pipe_fs_i.h
++++ b/include/linux/pipe_fs_i.h
+@@ -115,6 +115,17 @@ struct pipe_buf_operations {
+ void (*get)(struct pipe_inode_info *, struct pipe_buffer *);
+ };
+
++/**
++ * pipe_buf_get - get a reference to a pipe_buffer
++ * @pipe: the pipe that the buffer belongs to
++ * @buf: the buffer to get a reference to
++ */
++static inline void pipe_buf_get(struct pipe_inode_info *pipe,
++ struct pipe_buffer *buf)
++{
++ buf->ops->get(pipe, buf);
++}
++
+ /* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual
+ memory allocation, whereas PIPE_BUF makes atomicity guarantees. */
+ #define PIPE_SIZE PAGE_SIZE
diff --git a/patches.fixes/signals-avoid-random-wakeups-in-sigsuspend.patch b/patches.fixes/signals-avoid-random-wakeups-in-sigsuspend.patch
new file mode 100644
index 0000000000..32cc00706c
--- /dev/null
+++ b/patches.fixes/signals-avoid-random-wakeups-in-sigsuspend.patch
@@ -0,0 +1,45 @@
+From 906000bc26fed13e58c17439d331cdb38d9ac22f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sasha.levin@oracle.com>
+Date: Fri, 5 Feb 2016 15:36:05 -0800
+Subject: [PATCH] signals: avoid random wakeups in sigsuspend()
+Patch-mainline: v4.5-rc3
+Git-commit: 823dd3224a07f618d652a7743c9603222d019de3
+References: bsc#1137915
+
+A random wakeup can get us out of sigsuspend() without TIF_SIGPENDING
+being set.
+
+Avoid that by making sure we were signaled, like sys_pause() does.
+
+Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
+Acked-by: Oleg Nesterov <oleg@redhat.com>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Oscar Salvador <osalvador@suse.de>
+---
+ kernel/signal.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 8bb6d08b9f64..654374f7db3e 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -3593,8 +3593,10 @@ static int sigsuspend(sigset_t *set)
+ current->saved_sigmask = current->blocked;
+ set_current_blocked(set);
+
+- __set_current_state(TASK_INTERRUPTIBLE);
+- schedule();
++ while (!signal_pending(current)) {
++ __set_current_state(TASK_INTERRUPTIBLE);
++ schedule();
++ }
+ set_restore_sigmask();
+ return -ERESTARTNOHAND;
+ }
+--
+2.12.3
+
diff --git a/patches.kabi/fs-prevent-page-refcount-overflow-in-pipe_buf_get-kabi.patch b/patches.kabi/fs-prevent-page-refcount-overflow-in-pipe_buf_get-kabi.patch
new file mode 100644
index 0000000000..db55147a22
--- /dev/null
+++ b/patches.kabi/fs-prevent-page-refcount-overflow-in-pipe_buf_get-kabi.patch
@@ -0,0 +1,58 @@
+From: Vlastimil Babka <vbabka@suse.cz>
+Subject: fs: prevent page refcount overflow in pipe_buf_get - KABI fix
+References: CVE-2019-11487, bsc#1133190, KABI
+Patch-mainline: Never, kABI
+
+Hide return value change from void to bool, current callers can keep
+ignoring it.
+
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+---
+ fs/pipe.c | 7 ++++++-
+ include/linux/pipe_fs_i.h | 11 ++++++++++-
+ 2 files changed, 16 insertions(+), 2 deletions(-)
+
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -178,7 +178,12 @@ EXPORT_SYMBOL(generic_pipe_buf_steal);
+ * in the tee() system call, when we duplicate the buffers in one
+ * pipe into another.
+ */
+-bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
++#ifndef __GENKSYMS__
++bool
++#else
++void
++#endif
++generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
+ {
+ return try_get_page(buf->page);
+ }
+--- a/include/linux/pipe_fs_i.h
++++ b/include/linux/pipe_fs_i.h
+@@ -112,7 +112,11 @@ struct pipe_buf_operations {
+ /*
+ * Get a reference to the pipe buffer.
+ */
++#ifndef __GENKSYMS__
+ bool (*get)(struct pipe_inode_info *, struct pipe_buffer *);
++#else
++ void (*get)(struct pipe_inode_info *, struct pipe_buffer *);
++#endif
+ };
+
+ /**
+@@ -150,7 +154,12 @@ struct pipe_inode_info *alloc_pipe_info(
+ void free_pipe_info(struct pipe_inode_info *);
+
+ /* Generic pipe buffer ops functions */
+-bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
++#ifndef __GENKSYMS__
++bool
++#else
++void
++#endif
++generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
+ int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
+ int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
+ void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
diff --git a/patches.kernel.org/4.4.177-067-net-nfc-Fix-NULL-dereference-on-nfc_llcp_buil.patch b/patches.kernel.org/4.4.177-067-net-nfc-Fix-NULL-dereference-on-nfc_llcp_buil.patch
index a80c7b1617..e319070c57 100644
--- a/patches.kernel.org/4.4.177-067-net-nfc-Fix-NULL-dereference-on-nfc_llcp_buil.patch
+++ b/patches.kernel.org/4.4.177-067-net-nfc-Fix-NULL-dereference-on-nfc_llcp_buil.patch
@@ -1,7 +1,7 @@
From: YueHaibing <yuehaibing@huawei.com>
Date: Fri, 22 Feb 2019 15:37:58 +0800
Subject: [PATCH] net: nfc: Fix NULL dereference on nfc_llcp_build_tlv fails
-References: bnc#1012382
+References: bnc#1012382, CVE-2019-12818, bsc#1138293
Patch-mainline: 4.4.177
Git-commit: 58bdd544e2933a21a51eecf17c3f5f94038261b5
diff --git a/patches.kernel.org/4.4.177-137-mdio_bus-Fix-use-after-free-on-device_registe.patch b/patches.kernel.org/4.4.177-137-mdio_bus-Fix-use-after-free-on-device_registe.patch
index 6b2eb961b5..e3992a7196 100644
--- a/patches.kernel.org/4.4.177-137-mdio_bus-Fix-use-after-free-on-device_registe.patch
+++ b/patches.kernel.org/4.4.177-137-mdio_bus-Fix-use-after-free-on-device_registe.patch
@@ -2,7 +2,7 @@ From: YueHaibing <yuehaibing@huawei.com>
Date: Thu, 21 Feb 2019 22:42:01 +0800
Subject: [PATCH] mdio_bus: Fix use-after-free on device_register fails
Patch-mainline: 4.4.177
-References: bnc#1012382 git-fixes
+References: bnc#1012382 git-fixes, CVE-2019-12819, bsc#1138291
Git-commit: 6ff7b060535e87c2ae14dd8548512abfdda528fb
[ Upstream commit 6ff7b060535e87c2ae14dd8548512abfdda528fb ]
diff --git a/series.conf b/series.conf
index 51048fbf27..addef30646 100644
--- a/series.conf
+++ b/series.conf
@@ -8698,6 +8698,7 @@
patches.suse/CFS-0021-crush-decode-and-initialize-chooseleaf_stable.patch
patches.suse/CFS-0022-libceph-advertise-support-for-TUNABLES5.patch
patches.suse/CFS-0023-libceph-MOSDOpReply-v7-encoding.patch
+ patches.fixes/signals-avoid-random-wakeups-in-sigsuspend.patch
patches.drivers/block-fix-pfn_mkwrite-dax-fault-handler.patch
patches.suse/mm-vmstat-make-quiet_vmstat-lighter.patch
patches.suse/vmstat-make-vmstat_update-deferrable.patch
@@ -16890,6 +16891,7 @@
patches.drivers/0187-nvme-scsi-Remove-power-management-support.patch
patches.drivers/0188-nvme-Pass-pointers-not-dma-addresses-to-nvme_get-set.patch
patches.drivers/0304-fs-block_dev.c-return-the-right-error-in-thaw_bdev.patch
+ patches.fixes/pipe-add-pipe_buf_get-helper.patch
patches.arch/powerpc-64-Do-load-of-PACAKBASE-in-LOAD_HANDLER.patch
patches.arch/powerpc-book3s-Add-a-cpu-table-entry-for-different-P.patch
patches.arch/powerpc-dlpar-012-pseries-fix-memory-leak-in-queue_hotplug_eve.patch
@@ -22679,6 +22681,8 @@
patches.suse/0008-mm-memory_hotplug-fix-the-section-mismatch-warning.patch
patches.suse/0009-mm-memory_hotplug-remove-unused-cruft-after-memory-h.patch
patches.fixes/mm-adaptive-hash-table-scaling.patch
+ patches.fixes/mm-gup-remove-broken-vm_bug_on_page-compound-check-for-hugepages.patch
+ patches.fixes/mm-gup-ensure-real-head-page-is-ref-counted-when-using-hugepages.patch
patches.drivers/0239-acpi-nfit-Add-support-of-NVDIMM-memory-error-notific.patch
patches.drivers/0240-acpi-nfit-Issue-Start-ARS-to-retrieve-existing-recor.patch
patches.arch/powerpc-fadump-Add-a-warning-when-fadump_reserve_mem.patch
@@ -25724,6 +25728,10 @@
patches.drivers/tpm-Fix-the-type-of-the-return-value-in-calc_tpm2_ev.patch
patches.drivers/IB-hfi1-Eliminate-opcode-tests-on-mr-deref.patch
patches.arch/powerpc-vdso32-fix-CLOCK_MONOTONIC-on-PPC64.patch
+ patches.fixes/mm-make-page-ref-count-overflow-check-tighter-and-more-explicit.patch
+ patches.fixes/mm-add-try_get_page-helper-function.patch
+ patches.fixes/mm-prevent-get_user_pages-from-overflowing-page-refcount.patch
+ patches.fixes/fs-prevent-page-refcount-overflow-in-pipe_buf_get.patch
patches.arch/svm-avic-fix-invalidate-logical-apic-id-entry
patches.drivers/ibmvnic-Enable-GRO.patch
patches.drivers/ibmvnic-Fix-netdev-feature-clobbering-during-a-reset.patch
@@ -25811,6 +25819,9 @@
patches.drivers/ibmvnic-Refresh-device-multicast-list-after-reset.patch
patches.drivers/ibmvnic-Fix-unchecked-return-codes-of-memory-allocat.patch
+ # jejb/scsi for-next
+ patches.drivers/scsi-mpt3sas_ctl-fix-double-fetch-bug-in-ctl_ioctl_main
+
# out-of-tree patches
patches.kabi/0001-move-power_up_on_resume-flag-to-end-of-structure-for.patch
patches.suse/0001-drm-ttm-Remove-warning-about-inconsistent-mapping-in.patch
@@ -27236,6 +27247,8 @@
# bsc#1127082
patches.arch/kvm-x86-Add-AMD-s-EX_CFG-to-the-list-of-ignored-MSRs.patch
+ patches.kabi/fs-prevent-page-refcount-overflow-in-pipe_buf_get-kabi.patch
+
########################################################
# You'd better have a good reason for adding a patch
# below here.