Home Home > GIT Browse > SLE12-SP4
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2019-01-14 10:07:57 +0100
committerVlastimil Babka <vbabka@suse.cz>2019-01-14 10:08:21 +0100
commit418db51f8b57ec33f90bc040adcae419f70667f8 (patch)
treef56794a02c5c3449da40597de62646dd57938989
parent4c4ed8764a8ba015076ab56947727776dd4366ed (diff)
mm/huge_memory: fix lockdep complaint on 32-bit i_size_read()
(VM Functionality, bsc#1121599).
-rw-r--r--patches.fixes/mm-huge_memory-fix-lockdep-complaint-on-32-bit-i_size_read.patch99
-rw-r--r--series.conf1
2 files changed, 100 insertions, 0 deletions
diff --git a/patches.fixes/mm-huge_memory-fix-lockdep-complaint-on-32-bit-i_size_read.patch b/patches.fixes/mm-huge_memory-fix-lockdep-complaint-on-32-bit-i_size_read.patch
new file mode 100644
index 0000000000..b6dfab4f10
--- /dev/null
+++ b/patches.fixes/mm-huge_memory-fix-lockdep-complaint-on-32-bit-i_size_read.patch
@@ -0,0 +1,99 @@
+From: Hugh Dickins <hughd@google.com>
+Date: Fri, 30 Nov 2018 14:10:21 -0800
+Subject: mm/huge_memory: fix lockdep complaint on 32-bit i_size_read()
+Git-commit: 006d3ff27e884f80bd7d306b041afc415f63598f
+Patch-mainline: v4.20-rc5
+References: VM Functionality, bsc#1121599
+
+Huge tmpfs testing, on 32-bit kernel with lockdep enabled, showed that
+__split_huge_page() was using i_size_read() while holding the irq-safe
+lru_lock and page tree lock, but the 32-bit i_size_read() uses an
+irq-unsafe seqlock which should not be nested inside them.
+
+Instead, read the i_size earlier in split_huge_page_to_list(), and pass
+the end offset down to __split_huge_page(): all while holding head page
+lock, which is enough to prevent truncation of that extent before the
+page tree lock has been taken.
+
+Link: http://lkml.kernel.org/r/alpine.LSU.2.11.1811261520070.2275@eggly.anvils
+Fixes: baa355fd33142 ("thp: file pages support for split_huge_page()")
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Jerome Glisse <jglisse@redhat.com>
+Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: <stable@vger.kernel.org> [4.8+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+---
+ mm/huge_memory.c | 19 +++++++++++++------
+ 1 file changed, 13 insertions(+), 6 deletions(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2260,12 +2260,11 @@ static void __split_huge_page_tail(struc
+ }
+
+ static void __split_huge_page(struct page *page, struct list_head *list,
+- unsigned long flags)
++ pgoff_t end, unsigned long flags)
+ {
+ struct page *head = compound_head(page);
+ struct zone *zone = page_zone(head);
+ struct lruvec *lruvec;
+- pgoff_t end = -1;
+ int i;
+
+ lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat);
+@@ -2273,9 +2272,6 @@ static void __split_huge_page(struct pag
+ /* complete memcg works before add pages to LRU */
+ mem_cgroup_split_huge_fixup(head);
+
+- if (!PageAnon(page))
+- end = DIV_ROUND_UP(i_size_read(head->mapping->host), PAGE_SIZE);
+-
+ for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
+ __split_huge_page_tail(head, i, lruvec, list);
+ /* Some pages can be beyond i_size: drop them from page cache */
+@@ -2428,6 +2424,7 @@ int split_huge_page_to_list(struct page
+ int count, mapcount, extra_pins, ret;
+ bool mlocked;
+ unsigned long flags;
++ pgoff_t end;
+
+ VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+@@ -2448,6 +2445,7 @@ int split_huge_page_to_list(struct page
+ goto out;
+ }
+ extra_pins = 0;
++ end = -1;
+ mapping = NULL;
+ anon_vma_lock_write(anon_vma);
+ } else {
+@@ -2463,6 +2461,15 @@ int split_huge_page_to_list(struct page
+ extra_pins = HPAGE_PMD_NR;
+ anon_vma = NULL;
+ i_mmap_lock_read(mapping);
++
++ /*
++ *__split_huge_page() may need to trim off pages beyond EOF:
++ * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
++ * which cannot be nested inside the page tree lock. So note
++ * end now: i_size itself may be changed at any moment, but
++ * head page lock is good enough to serialize the trimming.
++ */
++ end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
+ }
+
+ /*
+@@ -2512,7 +2519,7 @@ int split_huge_page_to_list(struct page
+ if (mapping)
+ __dec_node_page_state(page, NR_SHMEM_THPS);
+ spin_unlock(&pgdata->split_queue_lock);
+- __split_huge_page(page, list, flags);
++ __split_huge_page(page, list, end, flags);
+ ret = 0;
+ } else {
+ if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
diff --git a/series.conf b/series.conf
index 5d98eb82dd..6c64b90dca 100644
--- a/series.conf
+++ b/series.conf
@@ -19365,6 +19365,7 @@
patches.fixes/userfaultfd-shmem-uffdio_copy-set-the-page-dirty-if-vm_write-is-not-set.patch
patches.fixes/mm-huge_memory-rename-freeze_page-to-unmap_page.patch
patches.fixes/mm-huge_memory-splitting-set-mapping-index-before-unfreeze.patch
+ patches.fixes/mm-huge_memory-fix-lockdep-complaint-on-32-bit-i_size_read.patch
patches.drivers/pci-imx6-fix-link-training-status-detection-in-link-up-check
patches.fixes/fs-fix-lost-error-code-in-dio_complete.patch
patches.fixes/nvme-free-ctrl-device-name-on-init-failure.patch