Home Home > GIT Browse > SLE12-SP3-AZURE
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2019-02-19 11:02:19 +0100
committerMichal Hocko <mhocko@suse.com>2019-02-19 11:02:19 +0100
commitc9b5c90fc8aa7bcc6bb438094e417b2a7e99185a (patch)
tree445fd3e1dc7aa74034ecad5f029edf0321678086
parenteec003c669ed8912b6acebdbebe1a08c5417191c (diff)
mm, page_alloc: drop should_suppress_show_mem (bnc#1125892,
-rw-r--r--patches.fixes/0001-mm-page_alloc-drop-should_suppress_show_mem.patch72
-rw-r--r--series.conf1
2 files changed, 73 insertions, 0 deletions
diff --git a/patches.fixes/0001-mm-page_alloc-drop-should_suppress_show_mem.patch b/patches.fixes/0001-mm-page_alloc-drop-should_suppress_show_mem.patch
new file mode 100644
index 0000000000..024dfe2efb
--- /dev/null
+++ b/patches.fixes/0001-mm-page_alloc-drop-should_suppress_show_mem.patch
@@ -0,0 +1,72 @@
+From 2c029a1ea3aac296cd5b47584a579defcc4b4aa0 Mon Sep 17 00:00:00 2001
+From: Michal Hocko <mhocko@suse.com>
+Date: Fri, 26 Oct 2018 15:06:49 -0700
+Subject: [PATCH] mm, page_alloc: drop should_suppress_show_mem
+Git-commit: 2c029a1ea3aac296cd5b47584a579defcc4b4aa0
+Patch-mainline: v4.20-rc1
+References: bnc#1125892, bnc#1106061
+
+should_suppress_show_mem() was introduced to reduce the overhead of
+show_mem on large NUMA systems. Things have changed since then though.
+Namely c78e93630d15 ("mm: do not walk all of system memory during
+show_mem") has reduced the overhead considerably.
+
+Moreover warn_alloc_show_mem clears SHOW_MEM_FILTER_NODES when called from
+the IRQ context already so we are not printing per node stats.
+
+Remove should_suppress_show_mem because we are losing potentially
+interesting information about allocation failures. We have seen a bug
+report where system gets unresponsive under memory pressure and there is
+only
+
+Kernel: [2032243.696888] qlge 0000:8b:00.1 ql1: Could not get a page chunk, i=8, clean_idx =200 .
+Kernel: [2032243.710725] swapper/7: page allocation failure: order:1, mode:0x1084120(GFP_ATOMIC|__GFP_COLD|__GFP_COMP)
+
+without an additional information for debugging. It would be great to see
+the state of the page allocator at the moment.
+
+Link: http://lkml.kernel.org/r/20180907114334.7088-1-mhocko@kernel.org
+Signed-off-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+
+---
+ mm/page_alloc.c | 17 +----------------
+ 1 file changed, 1 insertion(+), 16 deletions(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -3028,20 +3028,6 @@ reset_fair:
+ return NULL;
+ }
+
+-/*
+- * Large machines with many possible nodes should not always dump per-node
+- * meminfo in irq context.
+- */
+-static inline bool should_suppress_show_mem(void)
+-{
+- bool ret = false;
+-
+-#if NODES_SHIFT > 8
+- ret = in_interrupt();
+-#endif
+- return ret;
+-}
+-
+ static DEFINE_RATELIMIT_STATE(nopage_rs,
+ DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+@@ -3083,8 +3069,7 @@ void warn_alloc_failed(gfp_t gfp_mask, u
+ pr_warn("%s: page allocation failure: order:%u, mode:%#x(%pGg)\n",
+ current->comm, order, gfp_mask, &gfp_mask);
+ dump_stack();
+- if (!should_suppress_show_mem())
+- show_mem(filter);
++ show_mem(filter);
+ }
+
+ static inline struct page *
diff --git a/series.conf b/series.conf
index 736f75dfb4..d3102e7115 100644
--- a/series.conf
+++ b/series.conf
@@ -24169,6 +24169,7 @@
patches.arch/powerpc-rtas-Fix-a-potential-race-between-CPU-Offlin.patch
patches.arch/powerpc-traps-restore-recoverability-of-machine_chec.patch
patches.fixes/0001-mm-don-t-miss-the-last-page-because-of-round-off-err.patch
+ patches.fixes/0001-mm-page_alloc-drop-should_suppress_show_mem.patch
patches.fixes/0001-drm-cirrus-Use-drm_framebuffer_put-to-avoid-kernel-o.patch
patches.fixes/0001-drm-virtio-fix-bounds-check-in-virtio_gpu_cmd_get_ca.patch
patches.fixes/0001-drm-hisilicon-hibmc-Do-not-carry-error-code-in-HiBMC.patch