Home Home > GIT Browse > SLE12-SP4-AZURE
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2019-01-14 10:07:57 +0100
committerVlastimil Babka <vbabka@suse.cz>2019-01-14 10:08:50 +0100
commitaf44e279d391f104c94b20068a651a5ef9de550e (patch)
treeff7614ff7fdeffff6cc093aca5ae7185d4d44724
parent72c8f7d578b852107ebb4e959c8e9605dd2de893 (diff)
mm/khugepaged: minor reorderings in collapse_shmem() (VM
Functionality, bsc#1121599).
-rw-r--r--patches.fixes/mm-khugepaged-minor-reorderings-in-collapse_shmem.patch232
-rw-r--r--series.conf1
2 files changed, 233 insertions, 0 deletions
diff --git a/patches.fixes/mm-khugepaged-minor-reorderings-in-collapse_shmem.patch b/patches.fixes/mm-khugepaged-minor-reorderings-in-collapse_shmem.patch
new file mode 100644
index 0000000000..f4c0d7ca23
--- /dev/null
+++ b/patches.fixes/mm-khugepaged-minor-reorderings-in-collapse_shmem.patch
@@ -0,0 +1,232 @@
+From: Hugh Dickins <hughd@google.com>
+Date: Fri, 30 Nov 2018 14:10:39 -0800
+Subject: mm/khugepaged: minor reorderings in collapse_shmem()
+Git-commit: 042a30824871fa3149b0127009074b75cc25863c
+Patch-mainline: v4.20-rc5
+References: VM Functionality, bsc#1121599
+
+[ vbabka@suse.cz: use stable 4.14 pre-xarray backport ]
+
+Several cleanups in collapse_shmem(): most of which probably do not
+really matter, beyond doing things in a more familiar and reassuring
+order. Simplify the failure gotos in the main loop, and on success
+update stats while interrupts still disabled from the last iteration.
+
+Link: http://lkml.kernel.org/r/alpine.LSU.2.11.1811261526400.2275@eggly.anvils
+Fixes: f3f0e1d2150b2 ("khugepaged: add support of collapse for tmpfs/shmem pages")
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Jerome Glisse <jglisse@redhat.com>
+Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: <stable@vger.kernel.org> [4.8+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+---
+ mm/khugepaged.c | 73 +++++++++++++++++++++++---------------------------------
+ 1 file changed, 30 insertions(+), 43 deletions(-)
+
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -1328,13 +1328,12 @@ static void collapse_shmem(struct mm_str
+ goto out;
+ }
+
++ __SetPageLocked(new_page);
++ __SetPageSwapBacked(new_page);
+ new_page->index = start;
+ new_page->mapping = mapping;
+- __SetPageSwapBacked(new_page);
+- __SetPageLocked(new_page);
+ BUG_ON(!page_ref_freeze(new_page, 1));
+
+-
+ /*
+ * At this point the new_page is 'frozen' (page_count() is zero), locked
+ * and not up-to-date. It's safe to insert it into radix tree, because
+@@ -1363,13 +1362,13 @@ static void collapse_shmem(struct mm_str
+ */
+ if (n && !shmem_charge(mapping->host, n)) {
+ result = SCAN_FAIL;
+- break;
++ goto tree_locked;
+ }
+- nr_none += n;
+ for (; index < min(iter.index, end); index++) {
+ radix_tree_insert(&mapping->page_tree, index,
+ new_page + (index % HPAGE_PMD_NR));
+ }
++ nr_none += n;
+
+ /* We are done. */
+ if (index >= end)
+@@ -1385,12 +1384,12 @@ static void collapse_shmem(struct mm_str
+ result = SCAN_FAIL;
+ goto tree_unlocked;
+ }
+- spin_lock_irq(&mapping->tree_lock);
+ } else if (trylock_page(page)) {
+ get_page(page);
++ spin_unlock_irq(&mapping->tree_lock);
+ } else {
+ result = SCAN_PAGE_LOCK;
+- break;
++ goto tree_locked;
+ }
+
+ /*
+@@ -1405,11 +1404,10 @@ static void collapse_shmem(struct mm_str
+ result = SCAN_TRUNCATED;
+ goto out_unlock;
+ }
+- spin_unlock_irq(&mapping->tree_lock);
+
+ if (isolate_lru_page(page)) {
+ result = SCAN_DEL_PAGE_LRU;
+- goto out_isolate_failed;
++ goto out_unlock;
+ }
+
+ if (page_mapped(page))
+@@ -1431,7 +1429,9 @@ static void collapse_shmem(struct mm_str
+ */
+ if (!page_ref_freeze(page, 3)) {
+ result = SCAN_PAGE_COUNT;
+- goto out_lru;
++ spin_unlock_irq(&mapping->tree_lock);
++ putback_lru_page(page);
++ goto out_unlock;
+ }
+
+ /*
+@@ -1447,17 +1447,10 @@ static void collapse_shmem(struct mm_str
+ slot = radix_tree_iter_resume(slot, &iter);
+ index++;
+ continue;
+-out_lru:
+- spin_unlock_irq(&mapping->tree_lock);
+- putback_lru_page(page);
+-out_isolate_failed:
+- unlock_page(page);
+- put_page(page);
+- goto tree_unlocked;
+ out_unlock:
+ unlock_page(page);
+ put_page(page);
+- break;
++ goto tree_unlocked;
+ }
+
+ /*
+@@ -1465,7 +1458,7 @@ out_unlock:
+ * This code only triggers if there's nothing in radix tree
+ * beyond 'end'.
+ */
+- if (result == SCAN_SUCCEED && index < end) {
++ if (index < end) {
+ int n = end - index;
+
+ /* Stop if extent has been truncated, and is now empty */
+@@ -1477,7 +1470,6 @@ out_unlock:
+ result = SCAN_FAIL;
+ goto tree_locked;
+ }
+-
+ for (; index < end; index++) {
+ radix_tree_insert(&mapping->page_tree, index,
+ new_page + (index % HPAGE_PMD_NR));
+@@ -1485,14 +1477,19 @@ out_unlock:
+ nr_none += n;
+ }
+
++ __inc_node_page_state(new_page, NR_SHMEM_THPS);
++ if (nr_none) {
++ struct zone *zone = page_zone(new_page);
++
++ __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
++ __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
++ }
++
+ tree_locked:
+ spin_unlock_irq(&mapping->tree_lock);
+ tree_unlocked:
+
+ if (result == SCAN_SUCCEED) {
+- unsigned long flags;
+- struct zone *zone = page_zone(new_page);
+-
+ /*
+ * Replacing old pages with new one has succeed, now we need to
+ * copy the content and free old pages.
+@@ -1506,11 +1503,11 @@ tree_unlocked:
+ copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
+ page);
+ list_del(&page->lru);
+- unlock_page(page);
+- page_ref_unfreeze(page, 1);
+ page->mapping = NULL;
++ page_ref_unfreeze(page, 1);
+ ClearPageActive(page);
+ ClearPageUnevictable(page);
++ unlock_page(page);
+ put_page(page);
+ index++;
+ }
+@@ -1519,28 +1516,17 @@ tree_unlocked:
+ index++;
+ }
+
+- local_irq_save(flags);
+- __inc_node_page_state(new_page, NR_SHMEM_THPS);
+- if (nr_none) {
+- __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
+- __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
+- }
+- local_irq_restore(flags);
+-
+- /*
+- * Remove pte page tables, so we can re-faulti
+- * the page as huge.
+- */
+- retract_page_tables(mapping, start);
+-
+ /* Everything is ready, let's unfreeze the new_page */
+- set_page_dirty(new_page);
+ SetPageUptodate(new_page);
+ page_ref_unfreeze(new_page, HPAGE_PMD_NR);
++ set_page_dirty(new_page);
+ mem_cgroup_commit_charge(new_page, memcg, false, true);
+ lru_cache_add_anon(new_page);
+- unlock_page(new_page);
+
++ /*
++ * Remove pte page tables, so we can re-fault the page as huge.
++ */
++ retract_page_tables(mapping, start);
+ *hpage = NULL;
+ } else {
+ /* Something went wrong: rollback changes to the radix-tree */
+@@ -1573,8 +1559,8 @@ tree_unlocked:
+ slot, page);
+ slot = radix_tree_iter_resume(slot, &iter);
+ spin_unlock_irq(&mapping->tree_lock);
+- putback_lru_page(page);
+ unlock_page(page);
++ putback_lru_page(page);
+ spin_lock_irq(&mapping->tree_lock);
+ }
+ VM_BUG_ON(nr_none);
+@@ -1583,9 +1569,10 @@ tree_unlocked:
+ /* Unfreeze new_page, caller would take care about freeing it */
+ page_ref_unfreeze(new_page, 1);
+ mem_cgroup_cancel_charge(new_page, memcg, true);
+- unlock_page(new_page);
+ new_page->mapping = NULL;
+ }
++
++ unlock_page(new_page);
+ out:
+ VM_BUG_ON(!list_empty(&pagelist));
+ /* TODO: tracepoints */
diff --git a/series.conf b/series.conf
index 358921c1f3..2f576304bf 100644
--- a/series.conf
+++ b/series.conf
@@ -19369,6 +19369,7 @@
patches.fixes/mm-khugepaged-collapse_shmem-stop-if-punched-or-truncated.patch
patches.fixes/mm-khugepaged-fix-crashes-due-to-misaccounted-holes.patch
patches.fixes/mm-khugepaged-collapse_shmem-remember-to-clear-holes.patch
+ patches.fixes/mm-khugepaged-minor-reorderings-in-collapse_shmem.patch
patches.drivers/pci-imx6-fix-link-training-status-detection-in-link-up-check
patches.fixes/fs-fix-lost-error-code-in-dio_complete.patch
patches.fixes/nvme-free-ctrl-device-name-on-init-failure.patch