Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2017-11-19 22:03:37 +0100
committerJiri Kosina <jkosina@suse.cz>2017-11-19 22:03:37 +0100
commitc05238624339c58035cccc9d74d9abe6298db60f (patch)
treeaa85615c748e0d27c959017e7f110488a0bf8b28
parente1d90be3e55651ae1dd844ef1b69ff09aaaa2f17 (diff)
parent7533cac0b42a31481c90f7b1396795717ac23436 (diff)
Merge remote-tracking branch 'origin/users/mgorman/SLE15/for-next' into SLE15
-rw-r--r--patches.suse/irq-softirqs-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch71
-rw-r--r--patches.suse/irq_work-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch41
-rw-r--r--patches.suse/locking-lockdep-Add-IRQs-disabled-enabled-assertion-APIs-lockdep_assert_irqs_enabled-disabled.patch63
-rw-r--r--patches.suse/mm-only-drain-per-cpu-pagevecs-once-per-pagevec-usage.patch3
-rw-r--r--patches.suse/mm-page_alloc-enable-disable-IRQs-once-when-freeing-a-list-of-pages.patch3
-rw-r--r--patches.suse/mm-page_alloc-simplify-list-handling-in-rmqueue_bulk.patch4
-rw-r--r--patches.suse/mm-pagevec-remove-cold-parameter-for-pagevecs.patch5
-rw-r--r--patches.suse/mm-pagevec-rename-pagevec-drained-field.patch4
-rw-r--r--patches.suse/mm-remove-__GFP_COLD.patch3
-rw-r--r--patches.suse/mm-remove-cold-parameter-for-release_pages.patch3
-rw-r--r--patches.suse/mm-remove-cold-parameter-from-free_hot_cold_page.patch3
-rw-r--r--patches.suse/mm-truncate-do-not-check-mapping-for-every-page-being-truncated.patch3
-rw-r--r--patches.suse/mm-truncate-remove-all-exceptional-entries-from-pagevec-under-one-lock-fix.patch145
-rw-r--r--patches.suse/mm-truncate-remove-all-exceptional-entries-from-pagevec-under-one-lock.patch115
-rw-r--r--patches.suse/netpoll-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch41
-rw-r--r--patches.suse/pcc-cpufreq-Re-introduce-deadband-effect-to-reduce-number-of-frequency-changes.patch89
-rw-r--r--patches.suse/perf-core-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch86
-rw-r--r--patches.suse/rcu-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch53
-rw-r--r--patches.suse/sched-clock-sched-cputime-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch57
-rw-r--r--patches.suse/sched-idle-Micro-optimize-the-idle-loop.patch84
-rw-r--r--patches.suse/smp-core-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch41
-rw-r--r--patches.suse/timers-hrtimer-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch44
-rw-r--r--patches.suse/timers-nohz-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch51
-rw-r--r--patches.suse/timers-posix-cpu-timers-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch60
-rw-r--r--patches.suse/workqueue-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch41
-rw-r--r--patches.suse/x86-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch65
-rw-r--r--series.conf24
27 files changed, 985 insertions, 217 deletions
diff --git a/patches.suse/irq-softirqs-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch b/patches.suse/irq-softirqs-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
new file mode 100644
index 0000000000..aed5133bd6
--- /dev/null
+++ b/patches.suse/irq-softirqs-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
@@ -0,0 +1,71 @@
+From 19d333ba710b47df6819b05594a3ce378e1b6604 Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <frederic@kernel.org>
+Date: Mon, 6 Nov 2017 16:01:18 +0100
+Subject: [PATCH] irq/softirqs: Use lockdep to assert IRQs are disabled/enabled
+
+References: bnc#1068202 Avoid irqs_disabled debugging checks in fast paths
+Patch-mainline: v4.15
+Git-commit: f71b74bca637fca7de78f1d44eaefde5bc900f9f
+
+Use lockdep to check that IRQs are enabled or disabled as expected. This
+way the sanity check only shows overhead when concurrency correctness
+debug code is enabled.
+
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: David S . Miller <davem@davemloft.net>
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Tejun Heo <tj@kernel.org>
+Link: http://lkml.kernel.org/r/1509980490-4285-3-git-send-email-frederic@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ kernel/softirq.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index 4e09821f9d9e..662f7b1b7a78 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -137,7 +137,7 @@ EXPORT_SYMBOL(__local_bh_disable_ip);
+
+ static void __local_bh_enable(unsigned int cnt)
+ {
+- WARN_ON_ONCE(!irqs_disabled());
++ lockdep_assert_irqs_disabled();
+
+ if (softirq_count() == (cnt & SOFTIRQ_MASK))
+ trace_softirqs_on(_RET_IP_);
+@@ -158,7 +158,8 @@ EXPORT_SYMBOL(_local_bh_enable);
+
+ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
+ {
+- WARN_ON_ONCE(in_irq() || irqs_disabled());
++ WARN_ON_ONCE(in_irq());
++ lockdep_assert_irqs_enabled();
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ local_irq_disable();
+ #endif
+@@ -396,9 +397,8 @@ void irq_exit(void)
+ #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
+ local_irq_disable();
+ #else
+- WARN_ON_ONCE(!irqs_disabled());
++ lockdep_assert_irqs_disabled();
+ #endif
+-
+ account_irq_exit_time(current);
+ preempt_count_sub(HARDIRQ_OFFSET);
+ if (!in_interrupt() && local_softirq_pending())
+@@ -488,7 +488,7 @@ EXPORT_SYMBOL(__tasklet_hi_schedule);
+
+ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
+ {
+- BUG_ON(!irqs_disabled());
++ lockdep_assert_irqs_disabled();
+
+ t->next = __this_cpu_read(tasklet_hi_vec.head);
+ __this_cpu_write(tasklet_hi_vec.head, t);
diff --git a/patches.suse/irq_work-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch b/patches.suse/irq_work-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
new file mode 100644
index 0000000000..7d495dda55
--- /dev/null
+++ b/patches.suse/irq_work-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
@@ -0,0 +1,41 @@
+From 2227eb39542616e0080ba888d6752111fba4cf8d Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <frederic@kernel.org>
+Date: Mon, 6 Nov 2017 16:01:26 +0100
+Subject: [PATCH] irq_work: Use lockdep to assert IRQs are disabled/enabled
+
+References: bnc#1068202 Avoid irqs_disabled debugging checks in fast paths
+Patch-mainline: v4.15
+Git-commit: 3c7169a3bf8216a56761a8edf775072dd36a00a0
+
+Use lockdep to check that IRQs are enabled or disabled as expected. This
+way the sanity check only shows overhead when concurrency correctness
+debug code is enabled.
+
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: David S . Miller <davem@davemloft.net>
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Tejun Heo <tj@kernel.org>
+Link: http://lkml.kernel.org/r/1509980490-4285-11-git-send-email-frederic@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ kernel/irq_work.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/irq_work.c b/kernel/irq_work.c
+index bcf107ce0854..899579657a0a 100644
+--- a/kernel/irq_work.c
++++ b/kernel/irq_work.c
+@@ -188,7 +188,7 @@ void irq_work_tick(void)
+ */
+ void irq_work_sync(struct irq_work *work)
+ {
+- WARN_ON_ONCE(irqs_disabled());
++ lockdep_assert_irqs_enabled();
+
+ while (work->flags & IRQ_WORK_BUSY)
+ cpu_relax();
diff --git a/patches.suse/locking-lockdep-Add-IRQs-disabled-enabled-assertion-APIs-lockdep_assert_irqs_enabled-disabled.patch b/patches.suse/locking-lockdep-Add-IRQs-disabled-enabled-assertion-APIs-lockdep_assert_irqs_enabled-disabled.patch
new file mode 100644
index 0000000000..47539a0654
--- /dev/null
+++ b/patches.suse/locking-lockdep-Add-IRQs-disabled-enabled-assertion-APIs-lockdep_assert_irqs_enabled-disabled.patch
@@ -0,0 +1,63 @@
+From f4d3fddb570141aa2edb4e9d06949237518e45ab Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <frederic@kernel.org>
+Date: Mon, 6 Nov 2017 16:01:17 +0100
+Subject: [PATCH] locking/lockdep: Add IRQs disabled/enabled assertion APIs:
+ lockdep_assert_irqs_enabled()/disabled()
+
+References: bnc#1068202 Avoid irqs_disabled debugging checks in fast paths
+Patch-mainline: v4.15
+Git-commit: f54bb2ec02c839f6bfe3e8d438cd93d30b4809dd
+
+Checking whether IRQs are enabled or disabled is a very common sanity
+check, however not free of overhead especially on fastpath where such
+assertion is very common.
+
+Lockdep is a good host for such concurrency correctness check and it
+even already tracks down IRQs disablement state. Just reuse its
+machinery. This will allow us to get rid of the flags pop and check
+overhead from fast path when kernel is built for production.
+
+Suggested-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: David S . Miller <davem@davemloft.net>
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Tejun Heo <tj@kernel.org>
+Link: http://lkml.kernel.org/r/1509980490-4285-2-git-send-email-frederic@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ include/linux/lockdep.h | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
+index fffe49f188e6..0125a04ed343 100644
+--- a/include/linux/lockdep.h
++++ b/include/linux/lockdep.h
+@@ -586,9 +586,24 @@ do { \
+ lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
+ lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
+ } while (0)
++
++#define lockdep_assert_irqs_enabled() do { \
++ WARN_ONCE(debug_locks && !current->lockdep_recursion && \
++ !current->hardirqs_enabled, \
++ "IRQs not enabled as expected\n"); \
++ } while (0)
++
++#define lockdep_assert_irqs_disabled() do { \
++ WARN_ONCE(debug_locks && !current->lockdep_recursion && \
++ current->hardirqs_enabled, \
++ "IRQs not disabled as expected\n"); \
++ } while (0)
++
+ #else
+ # define might_lock(lock) do { } while (0)
+ # define might_lock_read(lock) do { } while (0)
++# define lockdep_assert_irqs_enabled() do { } while (0)
++# define lockdep_assert_irqs_disabled() do { } while (0)
+ #endif
+
+ #ifdef CONFIG_LOCKDEP
diff --git a/patches.suse/mm-only-drain-per-cpu-pagevecs-once-per-pagevec-usage.patch b/patches.suse/mm-only-drain-per-cpu-pagevecs-once-per-pagevec-usage.patch
index 00a808861f..6ecfda47dc 100644
--- a/patches.suse/mm-only-drain-per-cpu-pagevecs-once-per-pagevec-usage.patch
+++ b/patches.suse/mm-only-drain-per-cpu-pagevecs-once-per-pagevec-usage.patch
@@ -4,7 +4,8 @@ Date: Thu, 19 Oct 2017 13:50:13 +0100
Subject: [PATCH] mm: only drain per-cpu pagevecs once per pagevec usage
References: bsc#1060256
-Patch-mainline: not yet, accepted to mm tree
+Patch-mainline: v4.15
+Git-commit: d9ed0d08b6c6a882da1d8e75bb3162fc889fd199
When a pagevec is initialised on the stack, it is generally used multiple
times over a range of pages, looking up entries and then releasing them.
diff --git a/patches.suse/mm-page_alloc-enable-disable-IRQs-once-when-freeing-a-list-of-pages.patch b/patches.suse/mm-page_alloc-enable-disable-IRQs-once-when-freeing-a-list-of-pages.patch
index 29f923732f..a73d69c9cc 100644
--- a/patches.suse/mm-page_alloc-enable-disable-IRQs-once-when-freeing-a-list-of-pages.patch
+++ b/patches.suse/mm-page_alloc-enable-disable-IRQs-once-when-freeing-a-list-of-pages.patch
@@ -5,7 +5,8 @@ Subject: [PATCH] mm, page_alloc: enable/disable IRQs once when freeing a list
of pages
References: bsc#1060256
-Patch-mainline: not yet, accepted to mm tree
+Patch-mainline: v4.15
+Git-commit: 9cca35d42eb61b69e108a17215756c46173a5e6f
Patch series "Follow-up for speed up page cache truncation", v2.
diff --git a/patches.suse/mm-page_alloc-simplify-list-handling-in-rmqueue_bulk.patch b/patches.suse/mm-page_alloc-simplify-list-handling-in-rmqueue_bulk.patch
index 88eac67f2a..299aec8f03 100644
--- a/patches.suse/mm-page_alloc-simplify-list-handling-in-rmqueue_bulk.patch
+++ b/patches.suse/mm-page_alloc-simplify-list-handling-in-rmqueue_bulk.patch
@@ -2,8 +2,8 @@ From: Vlastimil Babka <vbabka@suse.cz>
Subject: mm, page_alloc: simplify list handling in rmqueue_bulk()
References: bsc#1060256
-Patch-mainline: not yet, accepted to mm tree
-Patch-name: patches.suse/mm-page_alloc-simplify-list-handling-in-rmqueue_bulk.patch
+Patch-mainline: v4.15
+Git-commit: 0fac3ba527f23219678c7c10c767e37d40127b51
rmqueue_bulk() fills an empty pcplist with pages from the free list. It
tries to preserve increasing order by pfn to the caller, because it leads
diff --git a/patches.suse/mm-pagevec-remove-cold-parameter-for-pagevecs.patch b/patches.suse/mm-pagevec-remove-cold-parameter-for-pagevecs.patch
index 7f669e8a88..f816ab211c 100644
--- a/patches.suse/mm-pagevec-remove-cold-parameter-for-pagevecs.patch
+++ b/patches.suse/mm-pagevec-remove-cold-parameter-for-pagevecs.patch
@@ -4,7 +4,8 @@ Date: Thu, 19 Oct 2017 13:52:23 +0100
Subject: [PATCH] mm, pagevec: remove cold parameter for pagevecs
References: bsc#1060256
-Patch-mainline: not yet, accepted to mm tree
+Patch-mainline: v4.15
+Git-commit: 8667982014d6048e0b5e286b6247ff24f48d4cc6
Every pagevec_init user claims the pages being released are hot even in
cases where it is unlikely the pages are hot. As no one cares about the
@@ -649,8 +650,8 @@ index ba6ca3291685..7197a5bb290a 100644
while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
min(end - index, (pgoff_t)PAGEVEC_SIZE),
@@ -335,7 +335,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
+ */
struct pagevec locked_pvec;
- int ei = PAGEVEC_SIZE;
- pagevec_init(&locked_pvec, 0);
+ pagevec_init(&locked_pvec);
diff --git a/patches.suse/mm-pagevec-rename-pagevec-drained-field.patch b/patches.suse/mm-pagevec-rename-pagevec-drained-field.patch
index 667b0c58c4..70a1c239c4 100644
--- a/patches.suse/mm-pagevec-rename-pagevec-drained-field.patch
+++ b/patches.suse/mm-pagevec-rename-pagevec-drained-field.patch
@@ -2,8 +2,8 @@ From: Mel Gorman <mgorman@techsingularity.net>
Subject: mm, pagevec: rename pagevec drained field
References: bsc#1060256
-Patch-mainline: not yet, accepted to mm tree
-Patch-name: patches.suse/mm-pagevec-rename-pagevec-drained-field.patch
+Patch-mainline: v4.15
+Git-commit: 7f0b5fb953e750a7410cc96c67a656d79db48bcb
According to Vlastimil Babka, the drained field in pagevec is potentially
misleading because it might be interpreted as draining this pagevec
diff --git a/patches.suse/mm-remove-__GFP_COLD.patch b/patches.suse/mm-remove-__GFP_COLD.patch
index f1925661b4..44546f1683 100644
--- a/patches.suse/mm-remove-__GFP_COLD.patch
+++ b/patches.suse/mm-remove-__GFP_COLD.patch
@@ -4,7 +4,8 @@ Date: Thu, 19 Oct 2017 13:56:53 +0100
Subject: [PATCH] mm: remove __GFP_COLD
References: bsc#1060256
-Patch-mainline: not yet, accepted to mm tree
+Patch-mainline: v4.15
+Git-commit: 453f85d43fa9ee243f0fc3ac4e1be45615301e3f
As the page free path makes no distinction between cache hot and cold
pages, there is no real useful ordering of pages in the free list that
diff --git a/patches.suse/mm-remove-cold-parameter-for-release_pages.patch b/patches.suse/mm-remove-cold-parameter-for-release_pages.patch
index 927a4e6764..5c99038bd4 100644
--- a/patches.suse/mm-remove-cold-parameter-for-release_pages.patch
+++ b/patches.suse/mm-remove-cold-parameter-for-release_pages.patch
@@ -4,7 +4,8 @@ Date: Thu, 19 Oct 2017 13:55:13 +0100
Subject: [PATCH] mm: remove cold parameter for release_pages
References: bsc#1060256
-Patch-mainline: not yet, accepted to mm tree
+Patch-mainline: v4.15
+Git-commit: c6f92f9fbe7dbcc8903a67229aa88b4077ae4422
All callers of release_pages claim the pages being released are cache hot.
As no one cares about the hotness of pages being released to the
diff --git a/patches.suse/mm-remove-cold-parameter-from-free_hot_cold_page.patch b/patches.suse/mm-remove-cold-parameter-from-free_hot_cold_page.patch
index 12527552b0..7654c36398 100644
--- a/patches.suse/mm-remove-cold-parameter-from-free_hot_cold_page.patch
+++ b/patches.suse/mm-remove-cold-parameter-from-free_hot_cold_page.patch
@@ -4,7 +4,8 @@ Date: Thu, 19 Oct 2017 13:55:21 +0100
Subject: [PATCH] mm: remove cold parameter from free_hot_cold_page*
References: bsc#1060256
-Patch-mainline: not yet, accepted to mm tree
+Patch-mainline: v4.15
+Git-commit: 2d4894b5d2ae0fe1725ea7abd57b33bfbbe45492
Most callers users of free_hot_cold_page claim the pages being released
are cache hot. The exception is the page reclaim paths where it is likely
diff --git a/patches.suse/mm-truncate-do-not-check-mapping-for-every-page-being-truncated.patch b/patches.suse/mm-truncate-do-not-check-mapping-for-every-page-being-truncated.patch
index 45aa108bfb..11b92eb894 100644
--- a/patches.suse/mm-truncate-do-not-check-mapping-for-every-page-being-truncated.patch
+++ b/patches.suse/mm-truncate-do-not-check-mapping-for-every-page-being-truncated.patch
@@ -5,7 +5,8 @@ Subject: [PATCH] mm, truncate: do not check mapping for every page being
truncated
References: bsc#1060256
-Patch-mainline: not yet, accepted to mm tree
+Patch-mainline: v4.15
+Git-commit: c7df8ad2910e965a6241b6d8f52fd122e26b0315
During truncation, the mapping has already been checked for shmem and dax
so it's known that workingset_update_node is required. This patch avoids
diff --git a/patches.suse/mm-truncate-remove-all-exceptional-entries-from-pagevec-under-one-lock-fix.patch b/patches.suse/mm-truncate-remove-all-exceptional-entries-from-pagevec-under-one-lock-fix.patch
deleted file mode 100644
index 1b332eb8b5..0000000000
--- a/patches.suse/mm-truncate-remove-all-exceptional-entries-from-pagevec-under-one-lock-fix.patch
+++ /dev/null
@@ -1,145 +0,0 @@
-From: Jan Kara <jack@suse.cz>
-Subject: [PATCH] mm, truncate: remove all exceptional entries from pagevec under one lock -fix
-References: bsc#1065138
-Patch-mainline: not yet, accepted to mm tree
-
-Patch "mm, truncate: remove all exceptional entries from pagevec" had a
-problem that truncate_exceptional_pvec_entries() didn't remove exceptional
-entries that were beyond end of truncated range from the pagevec. As a result
-pagevec_release() oopsed trying to treat exceptional entry as a page pointer.
-This can be reproduced by running xfstests generic/269 in a loop while
-applying memory pressure until the bug triggers.
-
-Rip out fragile passing of index of the first exceptional entry in the
-pagevec and scan the full pagevec instead. Additional pagevec pass doesn't
-have measurable overhead and the code is more robust that way.
-
-This is a fix to the mmotm patch
-mm-truncate-remove-all-exceptional-entries-from-pagevec-under-one-lock.patch
-
-Signed-off-by: Jan Kara <jack@suse.cz>
-
----
- mm/truncate.c | 43 +++++++++++++++++++++----------------------
- 1 file changed, 21 insertions(+), 22 deletions(-)
-
---- a/mm/truncate.c
-+++ b/mm/truncate.c
-@@ -59,24 +59,29 @@ static void clear_shadow_entry(struct ad
- * exceptional entries similar to what pagevec_remove_exceptionals does.
- */
- static void truncate_exceptional_pvec_entries(struct address_space *mapping,
-- struct pagevec *pvec, pgoff_t *indices, int ei)
-+ struct pagevec *pvec, pgoff_t *indices,
-+ pgoff_t end)
- {
- int i, j;
-- bool dax;
--
-- /* Return immediately if caller indicates there are no entries */
-- if (ei == PAGEVEC_SIZE)
-- return;
-+ bool dax, lock;
-
- /* Handled by shmem itself */
- if (shmem_mapping(mapping))
- return;
-
-+ for (j = 0; j < pagevec_count(pvec); j++)
-+ if (radix_tree_exceptional_entry(pvec->pages[j]))
-+ break;
-+
-+ if (j == pagevec_count(pvec))
-+ return;
-+
- dax = dax_mapping(mapping);
-- if (!dax)
-+ lock = !dax && indices[j] < end;
-+ if (lock)
- spin_lock_irq(&mapping->tree_lock);
-
-- for (i = ei, j = ei; i < pagevec_count(pvec); i++) {
-+ for (i = j; i < pagevec_count(pvec); i++) {
- struct page *page = pvec->pages[i];
- pgoff_t index = indices[i];
-
-@@ -85,6 +90,9 @@ static void truncate_exceptional_pvec_en
- continue;
- }
-
-+ if (index >= end)
-+ continue;
-+
- if (unlikely(dax)) {
- dax_delete_mapping_entry(mapping, index);
- continue;
-@@ -93,7 +101,7 @@ static void truncate_exceptional_pvec_en
- __clear_shadow_entry(mapping, index, page);
- }
-
-- if (!dax)
-+ if (lock)
- spin_unlock_irq(&mapping->tree_lock);
- pvec->nr = j;
- }
-@@ -333,7 +341,6 @@ void truncate_inode_pages_range(struct a
- * in a new pagevec.
- */
- struct pagevec locked_pvec;
-- int ei = PAGEVEC_SIZE;
-
- pagevec_init(&locked_pvec);
- for (i = 0; i < pagevec_count(&pvec); i++) {
-@@ -344,11 +351,8 @@ void truncate_inode_pages_range(struct a
- if (index >= end)
- break;
-
-- if (radix_tree_exceptional_entry(page)) {
-- if (ei == PAGEVEC_SIZE)
-- ei = i;
-+ if (radix_tree_exceptional_entry(page))
- continue;
-- }
-
- if (!trylock_page(page))
- continue;
-@@ -368,7 +372,7 @@ void truncate_inode_pages_range(struct a
- delete_from_page_cache_batch(mapping, &locked_pvec);
- for (i = 0; i < pagevec_count(&locked_pvec); i++)
- unlock_page(locked_pvec.pages[i]);
-- truncate_exceptional_pvec_entries(mapping, &pvec, indices, ei);
-+ truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
- pagevec_release(&pvec);
- cond_resched();
- index++;
-@@ -414,8 +418,6 @@ void truncate_inode_pages_range(struct a
-
- index = start;
- for ( ; ; ) {
-- int ei = PAGEVEC_SIZE;
--
- cond_resched();
- if (!pagevec_lookup_entries(&pvec, mapping, index,
- min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
-@@ -444,11 +446,8 @@ void truncate_inode_pages_range(struct a
- break;
- }
-
-- if (radix_tree_exceptional_entry(page)) {
-- if (ei == PAGEVEC_SIZE)
-- ei = i;
-+ if (radix_tree_exceptional_entry(page))
- continue;
-- }
-
- lock_page(page);
- WARN_ON(page_to_index(page) != index);
-@@ -456,7 +455,7 @@ void truncate_inode_pages_range(struct a
- truncate_inode_page(mapping, page);
- unlock_page(page);
- }
-- truncate_exceptional_pvec_entries(mapping, &pvec, indices, ei);
-+ truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
- pagevec_release(&pvec);
- index++;
- }
diff --git a/patches.suse/mm-truncate-remove-all-exceptional-entries-from-pagevec-under-one-lock.patch b/patches.suse/mm-truncate-remove-all-exceptional-entries-from-pagevec-under-one-lock.patch
index 08c5fd83dc..222363920d 100644
--- a/patches.suse/mm-truncate-remove-all-exceptional-entries-from-pagevec-under-one-lock.patch
+++ b/patches.suse/mm-truncate-remove-all-exceptional-entries-from-pagevec-under-one-lock.patch
@@ -5,18 +5,19 @@ Subject: [PATCH] mm, truncate: remove all exceptional entries from pagevec
under one lock
References: bsc#1060256
-Patch-mainline: not yet, accepted to mm tree
+Patch-mainline: v4.15
+Git-commit: f2187599189d94aeeee2fa5d9806186c7732ed37
During truncate each entry in a pagevec is checked to see if it is an
exceptional entry and if so, the shadow entry is cleaned up. This is
-potentially expensive as multiple entries for a mapping locks/unlocks the
-tree lock. This batches the operation such that any exceptional entries
-removed from a pagevec only acquire the mapping tree lock once. The
-corner case where this is more expensive is where there is only one
-exceptional entry but this is unlikely due to temporal locality and how it
-affects LRU ordering. Note that for truncations of small files created
-recently, this patch should show no gain because it only batches the
-handling of exceptional entries.
+potentially expensive as multiple entries for a mapping locks/unlocks
+the tree lock. This batches the operation such that any exceptional
+entries removed from a pagevec only acquire the mapping tree lock once.
+The corner case where this is more expensive is where there is only one
+exceptional entry but this is unlikely due to temporal locality and how
+it affects LRU ordering. Note that for truncations of small files
+created recently, this patch should show no gain because it only batches
+the handling of exceptional entries.
sparsetruncate (large)
4.14.0-rc4 4.14.0-rc4
@@ -46,11 +47,15 @@ Hmean RandCreate ops 71.92 ( 0.00%) 76.95 ( 7.00%)
Hmean RandCreate read 44.44 ( 0.00%) 49.23 ( 10.78%)
Hmean RandCreate del 24948.62 ( 0.00%) 24764.97 ( -0.74%)
-Truncation of a large number of files shows a substantial gain with 99% of
-files being truncated 6.46% faster. bonnie shows a modest gain of 2.53%
+Truncation of a large number of files shows a substantial gain with 99%
+of files being truncated 6.46% faster. bonnie shows a modest gain of
+2.53%
+[jack@suse.cz: fix truncate_exceptional_pvec_entries()]
+ Link: http://lkml.kernel.org/r/20171108164226.26788-1-jack@suse.cz
Link: http://lkml.kernel.org/r/20171018075952.10627-4-mgorman@techsingularity.net
-Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
+Signed-off-by: Jan Kara <jack@suse.cz>
Reviewed-by: Jan Kara <jack@suse.cz>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Andi Kleen <ak@linux.intel.com>
@@ -58,15 +63,13 @@ Cc: Dave Chinner <david@fromorbit.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
----
- mm/truncate.c | 86 ++++++++++++++++++++++++++++++++++++++++++-----------------
- 1 file changed, 61 insertions(+), 25 deletions(-)
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/mm/truncate.c b/mm/truncate.c
-index 0f7c90855768..ba6ca3291685 100644
+index 02a0c0466c78..c30e8fa3d063 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
-@@ -25,44 +25,77 @@
+@@ -25,44 +25,85 @@
#include <linux/rmap.h>
#include "internal.h"
@@ -118,14 +121,11 @@ index 0f7c90855768..ba6ca3291685 100644
-static void truncate_exceptional_entry(struct address_space *mapping,
- pgoff_t index, void *entry)
+static void truncate_exceptional_pvec_entries(struct address_space *mapping,
-+ struct pagevec *pvec, pgoff_t *indices, int ei)
++ struct pagevec *pvec, pgoff_t *indices,
++ pgoff_t end)
{
+ int i, j;
-+ bool dax;
-+
-+ /* Return immediately if caller indicates there are no entries */
-+ if (ei == PAGEVEC_SIZE)
-+ return;
++ bool dax, lock;
+
/* Handled by shmem itself */
if (shmem_mapping(mapping))
@@ -133,12 +133,19 @@ index 0f7c90855768..ba6ca3291685 100644
- if (dax_mapping(mapping)) {
- dax_delete_mapping_entry(mapping, index);
-- return;
++ for (j = 0; j < pagevec_count(pvec); j++)
++ if (radix_tree_exceptional_entry(pvec->pages[j]))
++ break;
++
++ if (j == pagevec_count(pvec))
+ return;
++
+ dax = dax_mapping(mapping);
-+ if (!dax)
++ lock = !dax && indices[j] < end;
++ if (lock)
+ spin_lock_irq(&mapping->tree_lock);
+
-+ for (i = ei, j = ei; i < pagevec_count(pvec); i++) {
++ for (i = j; i < pagevec_count(pvec); i++) {
+ struct page *page = pvec->pages[i];
+ pgoff_t index = indices[i];
+
@@ -147,6 +154,9 @@ index 0f7c90855768..ba6ca3291685 100644
+ continue;
+ }
+
++ if (index >= end)
++ continue;
++
+ if (unlikely(dax)) {
+ dax_delete_mapping_entry(mapping, index);
+ continue;
@@ -156,37 +166,31 @@ index 0f7c90855768..ba6ca3291685 100644
}
- clear_shadow_entry(mapping, index, entry);
+
-+ if (!dax)
++ if (lock)
+ spin_unlock_irq(&mapping->tree_lock);
+ pvec->nr = j;
}
/*
-@@ -300,6 +333,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
- * in a new pagevec.
- */
- struct pagevec locked_pvec;
-+ int ei = PAGEVEC_SIZE;
-
- pagevec_init(&locked_pvec, 0);
- for (i = 0; i < pagevec_count(&pvec); i++) {
-@@ -311,8 +345,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
+@@ -310,11 +351,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
+ if (index >= end)
break;
- if (radix_tree_exceptional_entry(page)) {
+- if (radix_tree_exceptional_entry(page)) {
- truncate_exceptional_entry(mapping, index,
- page);
-+ if (ei == PAGEVEC_SIZE)
-+ ei = i;
++ if (radix_tree_exceptional_entry(page))
continue;
- }
+- }
-@@ -334,12 +368,11 @@ void truncate_inode_pages_range(struct address_space *mapping,
+ if (!trylock_page(page))
+ continue;
+@@ -334,12 +372,11 @@ void truncate_inode_pages_range(struct address_space *mapping,
delete_from_page_cache_batch(mapping, &locked_pvec);
for (i = 0; i < pagevec_count(&locked_pvec); i++)
unlock_page(locked_pvec.pages[i]);
- pagevec_remove_exceptionals(&pvec);
-+ truncate_exceptional_pvec_entries(mapping, &pvec, indices, ei);
++ truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
pagevec_release(&pvec);
cond_resched();
index++;
@@ -195,16 +199,7 @@ index 0f7c90855768..ba6ca3291685 100644
if (partial_start) {
struct page *page = find_lock_page(mapping, start - 1);
if (page) {
-@@ -381,6 +414,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
-
- index = start;
- for ( ; ; ) {
-+ int ei = PAGEVEC_SIZE;
-+
- cond_resched();
- if (!pagevec_lookup_entries(&pvec, mapping, index,
- min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
-@@ -397,6 +432,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
+@@ -397,6 +434,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
pagevec_release(&pvec);
break;
}
@@ -212,23 +207,25 @@ index 0f7c90855768..ba6ca3291685 100644
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
-@@ -409,8 +445,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
+@@ -408,11 +446,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
+ break;
}
- if (radix_tree_exceptional_entry(page)) {
+- if (radix_tree_exceptional_entry(page)) {
- truncate_exceptional_entry(mapping, index,
- page);
-+ if (ei == PAGEVEC_SIZE)
-+ ei = i;
++ if (radix_tree_exceptional_entry(page))
continue;
- }
+- }
-@@ -420,7 +456,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
+ lock_page(page);
+ WARN_ON(page_to_index(page) != index);
+@@ -420,7 +455,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
truncate_inode_page(mapping, page);
unlock_page(page);
}
- pagevec_remove_exceptionals(&pvec);
-+ truncate_exceptional_pvec_entries(mapping, &pvec, indices, ei);
++ truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
pagevec_release(&pvec);
index++;
}
diff --git a/patches.suse/netpoll-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch b/patches.suse/netpoll-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
new file mode 100644
index 0000000000..56f1040ca3
--- /dev/null
+++ b/patches.suse/netpoll-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
@@ -0,0 +1,41 @@
+From e4f4b5d1135a86f6f808652a6f33088bd42ff413 Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <frederic@kernel.org>
+Date: Mon, 6 Nov 2017 16:01:29 +0100
+Subject: [PATCH] netpoll: Use lockdep to assert IRQs are disabled/enabled
+
+References: bnc#1068202 Avoid irqs_disabled debugging checks in fast paths
+Patch-mainline: v4.15
+Git-commit: af0733937317e1e03b60f3af8cf9cd59d665593c
+
+Use lockdep to check that IRQs are enabled or disabled as expected. This
+way the sanity check only shows overhead when concurrency correctness
+debug code is enabled.
+
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: David S. Miller <davem@davemloft.net>
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Tejun Heo <tj@kernel.org>
+Link: http://lkml.kernel.org/r/1509980490-4285-14-git-send-email-frederic@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ net/core/netpoll.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index a43fab2ec475..9ed46effeca1 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -334,7 +334,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
+ /* It is up to the caller to keep npinfo alive. */
+ struct netpoll_info *npinfo;
+
+- WARN_ON_ONCE(!irqs_disabled());
++ lockdep_assert_irqs_disabled();
+
+ npinfo = rcu_dereference_bh(np->dev->npinfo);
+ if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
diff --git a/patches.suse/pcc-cpufreq-Re-introduce-deadband-effect-to-reduce-number-of-frequency-changes.patch b/patches.suse/pcc-cpufreq-Re-introduce-deadband-effect-to-reduce-number-of-frequency-changes.patch
new file mode 100644
index 0000000000..2cc0b9f552
--- /dev/null
+++ b/patches.suse/pcc-cpufreq-Re-introduce-deadband-effect-to-reduce-number-of-frequency-changes.patch
@@ -0,0 +1,89 @@
+From 74833f9c6a2d3f3f3727eb078a96d677f3ef7955 Mon Sep 17 00:00:00 2001
+From: Andreas Herrmann <aherrmann@suse.com>
+Date: Wed, 17 Aug 2016 19:50:57 +0100
+Subject: [PATCH] pcc-cpufreq: Re-introduce deadband effect to reduce number of
+ frequency changes
+
+References: bsc#981838
+Patch-mainline: No, waiting on upstream review and may be unacceptable
+
+Commit 6393d6a102 (cpufreq: ondemand: Eliminate the deadband effect)
+introduced a performance regression for systems using pcc-cpufreq and
+ondemand governor. This is measurable with different workloads. E.g.
+wall-clock time for kernel compilation significantly increased.
+
+The elimination of the deadband effect significantly increased the
+number of frequency changes with pcc-cpufreq.
+
+Instead of reverting commit 6393d6a102 I suggest to add a workaround
+in pcc-cpufreq to re-introduce the deadband effect for this driver
+only - to restore the old performance behaviour with pcc-cpufreq with
+ondemand governor.
+
+Following some performance numbers for similar kernel compilations to
+illustrate the effect of commit 6393d6a102 and the proposed fix.
+
+Following typical numbers of kernel compilation tests with varying number of
+compile jobs:
+
+ v4.8.0-rc2 4.8.0-rc2-pcc-cpufreq-deadband
+ # of jobst user sys elapsed CPU user sys elapsed CPU
+ 2 440.39 116.49 4:33.35 203% 404.85 109.10 4:10.35 205%
+ 4 436.87 133.39 2:22.88 399% 381.83 128.00 2:06.84 401%
+ 8 475.49 157.68 1:22.24 769% 344.36 149.08 1:04.29 767%
+ 16 620.69 188.33 0:54.74 1477% 374.60 157.40 0:36.76 1447%
+ 32 815.79 209.58 0:37.22 2754% 490.46 160.22 0:24.87 2616%
+ 64 394.13 60.55 0:13.54 3355% 386.54 60.33 0:12.79 3493%
+ 120 398.24 61.55 0:14.60 3148% 390.44 61.19 0:13.07 3453%
+
+(HP ProLiant DL580 Gen8 system, 60 CPUs @ 2.80GHz)
+
+Link: http://marc.info/?l=linux-pm&m=147160912625600
+Signed-off-by: Andreas Herrmann <aherrmann@suse.com>
+
+---
+ drivers/cpufreq/pcc-cpufreq.c | 20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
+index 3f0ce2ae35ee..d2e2963f3ede 100644
+--- a/drivers/cpufreq/pcc-cpufreq.c
++++ b/drivers/cpufreq/pcc-cpufreq.c
+@@ -200,10 +200,26 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
+ {
+ struct pcc_cpu *pcc_cpu_data;
+ struct cpufreq_freqs freqs;
++ static u32 limit = 0;
++
+ u16 status;
+ u32 input_buffer;
+ int cpu;
+
++ if (!limit) {
++ u32 f_min = policy->cpuinfo.min_freq / 1000;
++ u32 f_max = policy->cpuinfo.max_freq / 1000;
++ limit = (f_max - f_min) * f_min;
++ limit /= f_max;
++ limit *= 1000;
++ limit += f_min * 1000;
++ pr_debug("pcc-cpufreq: setting deadband limit to %u kHz\n",
++ limit);
++ }
++
++ if (target_freq < limit)
++ target_freq = policy->min;
++
+ cpu = policy->cpu;
+ pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
+
+@@ -214,6 +230,10 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
+
+ freqs.old = policy->cur;
+ freqs.new = target_freq;
++
++ if (freqs.new == freqs.old)
++ return 0;
++
+ cpufreq_freq_transition_begin(policy, &freqs);
+ spin_lock(&pcc_lock);
+
diff --git a/patches.suse/perf-core-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch b/patches.suse/perf-core-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
new file mode 100644
index 0000000000..4be7a8bfd5
--- /dev/null
+++ b/patches.suse/perf-core-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
@@ -0,0 +1,86 @@
+From 1ea1860f53d2b1ef7e0faaea46829776a3297607 Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <frederic@kernel.org>
+Date: Mon, 6 Nov 2017 16:01:24 +0100
+Subject: [PATCH] perf/core: Use lockdep to assert IRQs are disabled/enabled
+
+References: bnc#1068202 Avoid irqs_disabled debugging checks in fast paths
+Patch-mainline: v4.15
+Git-commit: 164446455a5d3f1402b5a0ea42acce33fd576ed7
+
+Use lockdep to check that IRQs are enabled or disabled as expected. This
+way the sanity check only shows overhead when concurrency correctness
+debug code is enabled.
+
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: David S . Miller <davem@davemloft.net>
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Tejun Heo <tj@kernel.org>
+Link: http://lkml.kernel.org/r/1509980490-4285-9-git-send-email-frederic@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ kernel/events/core.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 78de4fed1890..ffbdeedf77b2 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -209,7 +209,7 @@ static int event_function(void *info)
+ struct perf_event_context *task_ctx = cpuctx->task_ctx;
+ int ret = 0;
+
+- WARN_ON_ONCE(!irqs_disabled());
++ lockdep_assert_irqs_disabled();
+
+ perf_ctx_lock(cpuctx, task_ctx);
+ /*
+@@ -306,7 +306,7 @@ static void event_function_local(struct perf_event *event, event_f func, void *d
+ struct task_struct *task = READ_ONCE(ctx->task);
+ struct perf_event_context *task_ctx = NULL;
+
+- WARN_ON_ONCE(!irqs_disabled());
++ lockdep_assert_irqs_disabled();
+
+ if (task) {
+ if (task == TASK_TOMBSTONE)
+@@ -1008,7 +1008,7 @@ static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
+ struct perf_cpu_context *cpuctx;
+ int rotations = 0;
+
+- WARN_ON(!irqs_disabled());
++ lockdep_assert_irqs_disabled();
+
+ cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
+ rotations = perf_rotate_context(cpuctx);
+@@ -1095,7 +1095,7 @@ static void perf_event_ctx_activate(struct perf_event_context *ctx)
+ {
+ struct list_head *head = this_cpu_ptr(&active_ctx_list);
+
+- WARN_ON(!irqs_disabled());
++ lockdep_assert_irqs_disabled();
+
+ WARN_ON(!list_empty(&ctx->active_ctx_list));
+
+@@ -1104,7 +1104,7 @@ static void perf_event_ctx_activate(struct perf_event_context *ctx)
+
+ static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
+ {
+- WARN_ON(!irqs_disabled());
++ lockdep_assert_irqs_disabled();
+
+ WARN_ON(list_empty(&ctx->active_ctx_list));
+
+@@ -3477,7 +3477,7 @@ void perf_event_task_tick(void)
+ struct perf_event_context *ctx, *tmp;
+ int throttled;
+
+- WARN_ON(!irqs_disabled());
++ lockdep_assert_irqs_disabled();
+
+ __this_cpu_inc(perf_throttled_seq);
+ throttled = __this_cpu_xchg(perf_throttled_count, 0);
diff --git a/patches.suse/rcu-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch b/patches.suse/rcu-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
new file mode 100644
index 0000000000..a8bdbffb7a
--- /dev/null
+++ b/patches.suse/rcu-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
@@ -0,0 +1,53 @@
+From 1831b77dd44a8f638d78e23d9a6f2dcfe14d9ca5 Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <frederic@kernel.org>
+Date: Mon, 6 Nov 2017 16:01:30 +0100
+Subject: [PATCH] rcu: Use lockdep to assert IRQs are disabled/enabled
+
+References: bnc#1068202 Avoid irqs_disabled debugging checks in fast paths
+Patch-mainline: v4.15
+Git-commit: b04db8e19fc2e9131524dec43057c1b96d5ba3ba
+
+Backport note: This is a very limited backport. The upstream patches
+ that added most of the assertions that this commit fixes were
+ not backported.
+
+Lockdep now has an integrated IRQs disabled/enabled sanity check. Just
+use it instead of the ad-hoc RCU version.
+
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: David S . Miller <davem@davemloft.net>
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Tejun Heo <tj@kernel.org>
+Link: http://lkml.kernel.org/r/1509980490-4285-15-git-send-email-frederic@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ kernel/rcu/tree.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 6354641c2f8f..41b052849555 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -899,7 +899,7 @@ void rcu_irq_exit(void)
+ {
+ struct rcu_dynticks *rdtp;
+
+- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!");
++ lockdep_assert_irqs_disabled();
+ rdtp = this_cpu_ptr(&rcu_dynticks);
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
+ rdtp->dynticks_nesting < 1);
+@@ -1031,7 +1031,7 @@ void rcu_irq_enter(void)
+ struct rcu_dynticks *rdtp;
+ long long oldval;
+
+- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!");
++ lockdep_assert_irqs_disabled();
+ rdtp = this_cpu_ptr(&rcu_dynticks);
+ oldval = rdtp->dynticks_nesting;
+ rdtp->dynticks_nesting++;
diff --git a/patches.suse/sched-clock-sched-cputime-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch b/patches.suse/sched-clock-sched-cputime-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
new file mode 100644
index 0000000000..548f5946c2
--- /dev/null
+++ b/patches.suse/sched-clock-sched-cputime-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
@@ -0,0 +1,57 @@
+From 847e0634979f7f03e633d72fc234aca15ba751d9 Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <frederic@kernel.org>
+Date: Mon, 6 Nov 2017 16:01:27 +0100
+Subject: [PATCH] sched/clock, sched/cputime: Use lockdep to assert IRQs are
+ disabled/enabled
+
+References: bnc#1068202 Avoid irqs_disabled debugging checks in fast paths
+Patch-mainline: v4.15
+Git-commit: 2c11dba00a39007b457c7607c1b1a4db95ca04bc
+
+Use lockdep to check that IRQs are enabled or disabled as expected. This
+way the sanity check only shows overhead when concurrency correctness
+debug code is enabled.
+
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: David S . Miller <davem@davemloft.net>
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Tejun Heo <tj@kernel.org>
+Link: http://lkml.kernel.org/r/1509980490-4285-12-git-send-email-frederic@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ kernel/sched/clock.c | 2 +-
+ kernel/sched/cputime.c | 3 +--
+ 2 files changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
+index ca0f8fc945c6..e086babe6c61 100644
+--- a/kernel/sched/clock.c
++++ b/kernel/sched/clock.c
+@@ -388,7 +388,7 @@ void sched_clock_tick(void)
+ if (unlikely(!sched_clock_running))
+ return;
+
+- WARN_ON_ONCE(!irqs_disabled());
++ lockdep_assert_irqs_disabled();
+
+ scd = this_scd();
+ __scd_stamp(scd);
+diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
+index ad275b25e3f9..57507c85bd2c 100644
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -259,8 +259,7 @@ static inline u64 account_other_time(u64 max)
+ {
+ u64 accounted;
+
+- /* Shall be converted to a lockdep-enabled lightweight check */
+- WARN_ON_ONCE(!irqs_disabled());
++ lockdep_assert_irqs_disabled();
+
+ accounted = steal_account_process_time(max);
+
diff --git a/patches.suse/sched-idle-Micro-optimize-the-idle-loop.patch b/patches.suse/sched-idle-Micro-optimize-the-idle-loop.patch
new file mode 100644
index 0000000000..cd5bcf0e4f
--- /dev/null
+++ b/patches.suse/sched-idle-Micro-optimize-the-idle-loop.patch
@@ -0,0 +1,84 @@
+From ef688415e32bc0cea19b9b4d55a1f6dbe3634216 Mon Sep 17 00:00:00 2001
+From: Cheng Jian <cj.chengjian@huawei.com>
+Date: Wed, 25 Oct 2017 19:28:27 +0800
+Subject: [PATCH] sched/idle: Micro-optimize the idle loop
+
+References: bnc#978907 Scheduler performance -- idle
+Patch-mainline: v4.15
+Git-commit: 54b933c6c954a8b7b0c2b40a1c4d3f7279d11e22
+
+Move the loop-invariant calculation of 'cpu' in do_idle() out of the loop body,
+because the current CPU is always constant.
+
+This improves the generated code both on x86-64 and ARM64:
+
+x86-64:
+
+Before patch (execution in loop):
+ 864: 0f ae e8 lfence
+ 867: 65 8b 05 c2 38 f1 7e mov %gs:0x7ef138c2(%rip),%eax
+ 86e: 89 c0 mov %eax,%eax
+ 870: 48 0f a3 05 68 19 08 bt %rax,0x1081968(%rip)
+ 877: 01
+
+After patch (execution in loop):
+ 872: 0f ae e8 lfence
+ 875: 4c 0f a3 25 63 19 08 bt %r12,0x1081963(%rip)
+ 87c: 01
+
+ARM64:
+
+Before patch (execution in loop):
+ c58: d5033d9f dsb ld
+ c5c: d538d080 mrs x0, tpidr_el1
+ c60: b8606a61 ldr w1, [x19,x0]
+ c64: 1100fc20 add w0, w1, #0x3f
+ c68: 7100003f cmp w1, #0x0
+ c6c: 1a81b000 csel w0, w0, w1, lt
+ c70: 13067c00 asr w0, w0, #6
+ c74: 93407c00 sxtw x0, w0
+ c78: f8607a80 ldr x0, [x20,x0,lsl #3]
+ c7c: 9ac12401 lsr x1, x0, x1
+ c80: 36000581 tbz w1, #0, d30 <do_idle+0x128>
+
+After patch (execution in loop):
+ c84: d5033d9f dsb ld
+ c88: f9400260 ldr x0, [x19]
+ c8c: ea14001f tst x0, x20
+ c90: 54000580 b.eq d40 <do_idle+0x138>
+
+Signed-off-by: Cheng Jian <cj.chengjian@huawei.com>
+[ Rewrote the title and the changelog. ]
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: huawei.libin@huawei.com
+Cc: xiexiuqi@huawei.com
+Link: http://lkml.kernel.org/r/1508930907-107755-1-git-send-email-cj.chengjian@huawei.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ kernel/sched/idle.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
+index ef63adce0c9c..44c3b46a57ca 100644
+--- a/kernel/sched/idle.c
++++ b/kernel/sched/idle.c
+@@ -209,6 +209,7 @@ static void cpuidle_idle_call(void)
+ */
+ static void do_idle(void)
+ {
++ int cpu = smp_processor_id();
+ /*
+ * If the arch has a polling bit, we maintain an invariant:
+ *
+@@ -225,7 +226,7 @@ static void do_idle(void)
+ check_pgt_cache();
+ rmb();
+
+- if (cpu_is_offline(smp_processor_id())) {
++ if (cpu_is_offline(cpu)) {
+ cpuhp_report_idle_dead();
+ arch_cpu_idle_dead();
+ }
diff --git a/patches.suse/smp-core-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch b/patches.suse/smp-core-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
new file mode 100644
index 0000000000..453629cbaf
--- /dev/null
+++ b/patches.suse/smp-core-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
@@ -0,0 +1,41 @@
+From 9214352305214b85eceffaffb0a3a9a11546dce5 Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <frederic@kernel.org>
+Date: Mon, 6 Nov 2017 16:01:22 +0100
+Subject: [PATCH] smp/core: Use lockdep to assert IRQs are disabled/enabled
+
+References: bnc#1068202 Avoid irqs_disabled debugging checks in fast paths
+Patch-mainline: v4.15
+Git-commit: 83efcbd028ad3aec36b5a3882cfa32490c135df7
+
+Use lockdep to check that IRQs are enabled or disabled as expected. This
+way the sanity check only shows overhead when concurrency correctness
+debug code is enabled.
+
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: David S . Miller <davem@davemloft.net>
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Tejun Heo <tj@kernel.org>
+Link: http://lkml.kernel.org/r/1509980490-4285-7-git-send-email-frederic@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ kernel/smp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/smp.c b/kernel/smp.c
+index 81cfca9b4cc3..b69dff67a09b 100644
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -213,7 +213,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
+ call_single_data_t *csd, *csd_next;
+ static bool warned;
+
+- WARN_ON(!irqs_disabled());
++ lockdep_assert_irqs_disabled();
+
+ head = this_cpu_ptr(&call_single_queue);
+ entry = llist_del_all(head);
diff --git a/patches.suse/timers-hrtimer-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch b/patches.suse/timers-hrtimer-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
new file mode 100644
index 0000000000..018ad994d0
--- /dev/null
+++ b/patches.suse/timers-hrtimer-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
@@ -0,0 +1,44 @@
+From b4f5889a837be8b8e15947a8ca6131eef6fc9a93 Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <frederic@kernel.org>
+Date: Mon, 6 Nov 2017 16:01:21 +0100
+Subject: [PATCH] timers/hrtimer: Use lockdep to assert IRQs are
+ disabled/enabled
+
+References: bnc#1068202 Avoid irqs_disabled debugging checks in fast paths
+Patch-mainline: v4.15
+Git-commit: 53bef3fd47f69e40b52c9f9acd3551dfff9f8702
+
+Use lockdep to check that IRQs are enabled or disabled as expected. This
+way the sanity check only shows overhead when concurrency correctness
+debug code is enabled.
+
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: David S . Miller <davem@davemloft.net>
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Tejun Heo <tj@kernel.org>
+Link: http://lkml.kernel.org/r/1509980490-4285-6-git-send-email-frederic@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ kernel/time/hrtimer.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index ac053bb5296e..82a06d25e644 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -757,9 +757,7 @@ void clock_was_set(void)
+ */
+ void hrtimers_resume(void)
+ {
+- WARN_ONCE(!irqs_disabled(),
+- KERN_INFO "hrtimers_resume() called with IRQs enabled!");
+-
++ lockdep_assert_irqs_disabled();
+ /* Retrigger on the local CPU */
+ retrigger_next_event(NULL);
+ /* And schedule a retrigger for all others */
diff --git a/patches.suse/timers-nohz-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch b/patches.suse/timers-nohz-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
new file mode 100644
index 0000000000..ab3d2427c4
--- /dev/null
+++ b/patches.suse/timers-nohz-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
@@ -0,0 +1,51 @@
+From a804c1d8a43c03c1e82c2beec75c4ce74496cf82 Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <frederic@kernel.org>
+Date: Mon, 6 Nov 2017 16:01:20 +0100
+Subject: [PATCH] timers/nohz: Use lockdep to assert IRQs are disabled/enabled
+
+References: bnc#1068202 Avoid irqs_disabled debugging checks in fast paths
+Patch-mainline: v4.15
+Git-commit: ebf3adbad012b89c4a51a3beae718a587d988a3a
+
+Use lockdep to check that IRQs are enabled or disabled as expected. This
+way the sanity check only shows overhead when concurrency correctness
+debug code is enabled.
+
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: David S . Miller <davem@davemloft.net>
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Tejun Heo <tj@kernel.org>
+Link: http://lkml.kernel.org/r/1509980490-4285-5-git-send-email-frederic@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ kernel/time/tick-sched.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 9f74c7cfccaa..0bafeaa6caca 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -192,7 +192,7 @@ static bool check_tick_dependency(atomic_t *dep)
+
+ static bool can_stop_full_tick(int cpu, struct tick_sched *ts)
+ {
+- WARN_ON_ONCE(!irqs_disabled());
++ lockdep_assert_irqs_disabled();
+
+ if (unlikely(!cpu_online(cpu)))
+ return false;
+@@ -944,8 +944,7 @@ void tick_nohz_idle_enter(void)
+ {
+ struct tick_sched *ts;
+
+- WARN_ON_ONCE(irqs_disabled());
+-
++ lockdep_assert_irqs_enabled();
+ /*
+ * Update the idle state in the scheduler domain hierarchy
+ * when tick_nohz_stop_sched_tick() is called from the idle loop.
diff --git a/patches.suse/timers-posix-cpu-timers-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch b/patches.suse/timers-posix-cpu-timers-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
new file mode 100644
index 0000000000..085401d4c2
--- /dev/null
+++ b/patches.suse/timers-posix-cpu-timers-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
@@ -0,0 +1,60 @@
+From 103067fb809d78047e2c2663a237bf9bc6cf320e Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <frederic@kernel.org>
+Date: Mon, 6 Nov 2017 16:01:28 +0100
+Subject: [PATCH] timers/posix-cpu-timers: Use lockdep to assert IRQs are
+ disabled/enabled
+
+References: bnc#1068202 Avoid irqs_disabled debugging checks in fast paths
+Patch-mainline: v4.15
+Git-commit: a69682200db9c2c26594188f81dd2df560af4683
+
+Use lockdep to check that IRQs are enabled or disabled as expected. This
+way the sanity check only shows overhead when concurrency correctness
+debug code is enabled.
+
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: David S . Miller <davem@davemloft.net>
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Tejun Heo <tj@kernel.org>
+Link: http://lkml.kernel.org/r/1509980490-4285-13-git-send-email-frederic@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ kernel/time/posix-cpu-timers.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
+index d2a1e6dd0291..29e695f174f2 100644
+--- a/kernel/time/posix-cpu-timers.c
++++ b/kernel/time/posix-cpu-timers.c
+@@ -590,7 +590,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
+ /*
+ * Disarm any old timer after extracting its expiry time.
+ */
+- WARN_ON_ONCE(!irqs_disabled());
++ lockdep_assert_irqs_disabled();
+
+ ret = 0;
+ old_incr = timer->it.cpu.incr;
+@@ -1027,7 +1027,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
+ /*
+ * Now re-arm for the new expiry time.
+ */
+- WARN_ON_ONCE(!irqs_disabled());
++ lockdep_assert_irqs_disabled();
+ arm_timer(timer);
+ unlock_task_sighand(p, &flags);
+
+@@ -1122,7 +1122,7 @@ void run_posix_cpu_timers(struct task_struct *tsk)
+ struct k_itimer *timer, *next;
+ unsigned long flags;
+
+- WARN_ON_ONCE(!irqs_disabled());
++ lockdep_assert_irqs_disabled();
+
+ /*
+ * The fast path checks that there are no expired thread or thread
diff --git a/patches.suse/workqueue-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch b/patches.suse/workqueue-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
new file mode 100644
index 0000000000..10fcf831b8
--- /dev/null
+++ b/patches.suse/workqueue-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
@@ -0,0 +1,41 @@
+From 33221d77e1c5b8c375d0796c0c3e3809b2fabb0a Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <frederic@kernel.org>
+Date: Mon, 6 Nov 2017 16:01:19 +0100
+Subject: [PATCH] workqueue: Use lockdep to assert IRQs are disabled/enabled
+
+References: bnc#1068202 Avoid irqs_disabled debugging checks in fast paths
+Patch-mainline: v4.15
+Git-commit: 8e8eb730759f9cfd7a761b0b4ee41d714e720993
+
+Use lockdep to check that IRQs are enabled or disabled as expected. This
+way the sanity check only shows overhead when concurrency correctness
+debug code is enabled.
+
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Tejun Heo <tj@kernel.org>
+Cc: David S . Miller <davem@davemloft.net>
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lkml.kernel.org/r/1509980490-4285-4-git-send-email-frederic@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ kernel/workqueue.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 7146ea70a62d..1b8cdd661a7f 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1375,7 +1375,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
+ * queued or lose PENDING. Grabbing PENDING and queueing should
+ * happen with IRQ disabled.
+ */
+- WARN_ON_ONCE(!irqs_disabled());
++ lockdep_assert_irqs_disabled();
+
+ debug_work_activate(work);
+
diff --git a/patches.suse/x86-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch b/patches.suse/x86-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
new file mode 100644
index 0000000000..b6ccebaea4
--- /dev/null
+++ b/patches.suse/x86-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
@@ -0,0 +1,65 @@
+From 9badbabee2f555e9722e7e48a4312e5b5038ba82 Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <frederic@kernel.org>
+Date: Mon, 6 Nov 2017 16:01:23 +0100
+Subject: [PATCH] x86: Use lockdep to assert IRQs are disabled/enabled
+
+References: bnc#1068202 Avoid irqs_disabled debugging checks in fast paths
+Patch-mainline: v4.15
+Git-commit: 7a10e2a9190628a4024ea394ce7bd641ae40ffd1
+
+Use lockdep to check that IRQs are enabled or disabled as expected. This
+way the sanity check only shows overhead when concurrency correctness
+debug code is enabled.
+
+It also makes no more sense to fix the IRQ flags when a bug is detected
+as the assertion is now pure config-dependent debugging. And to quote
+Peter Zijlstra:
+
+ The whole if !disabled, disable logic is uber paranoid programming,
+ but I don't think we've ever seen that WARN trigger, and if it does
+ (and then burns the kernel) we at least know what happend.
+
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: David S . Miller <davem@davemloft.net>
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Tejun Heo <tj@kernel.org>
+Link: http://lkml.kernel.org/r/1509980490-4285-8-git-send-email-frederic@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ arch/x86/entry/common.c | 4 +---
+ arch/x86/kernel/smpboot.c | 2 +-
+ 2 files changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
+index cdefcfdd9e63..58e639190aab 100644
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -183,9 +183,7 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
+ struct thread_info *ti = current_thread_info();
+ u32 cached_flags;
+
+- if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
+- local_irq_disable();
+-
++ lockdep_assert_irqs_disabled();
+ lockdep_sys_exit();
+
+ cached_flags = READ_ONCE(ti->flags);
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 4899684fe181..4949ab5b8f34 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -1097,7 +1097,7 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
+ unsigned long flags;
+ int err;
+
+- WARN_ON(irqs_disabled());
++ lockdep_assert_irqs_enabled();
+
+ pr_debug("++++++++++++++++++++=_---CPU UP %u\n", cpu);
+
diff --git a/series.conf b/series.conf
index 3bb3b06050..776ac24c12 100644
--- a/series.conf
+++ b/series.conf
@@ -941,6 +941,13 @@
patches.suse/cpufreq-intel_pstate-use-setpoint-of-40-on-servers.patch
patches.suse/cpufreq-ondemand-set-default-up_threshold-to-40-on-multi-core-systems.patch
+ # Reintroduce the deadband effect for pcc-cpufreq only as the
+ # elimination increases the number of frequency changes with
+ # pcc-cpufreq. The hardware does not always report what the
+ # correct maximum switching rate is and if it's too quick then
+ # the results are sub-optimal and may be nonsense.
+ patches.suse/pcc-cpufreq-Re-introduce-deadband-effect-to-reduce-number-of-frequency-changes.patch
+
########################################################
# Scheduler / Core
########################################################
@@ -1021,6 +1028,22 @@
patches.suse/sched-fair-Enable-SIS_AVG_CPU-by-default.patch
+ # bnc#1068202 reduce expensive debugging checks from fast paths
+ patches.suse/sched-idle-Micro-optimize-the-idle-loop.patch
+ patches.suse/locking-lockdep-Add-IRQs-disabled-enabled-assertion-APIs-lockdep_assert_irqs_enabled-disabled.patch
+ patches.suse/irq-softirqs-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
+ patches.suse/workqueue-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
+ patches.suse/timers-nohz-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
+ patches.suse/timers-hrtimer-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
+ patches.suse/smp-core-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
+ patches.suse/x86-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
+ patches.suse/perf-core-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
+ patches.suse/irq_work-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
+ patches.suse/sched-clock-sched-cputime-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
+ patches.suse/timers-posix-cpu-timers-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
+ patches.suse/netpoll-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
+ patches.suse/rcu-Use-lockdep-to-assert-IRQs-are-disabled-enabled.patch
+
########################################################
# locking/core
########################################################
@@ -1556,7 +1579,6 @@
patches.suse/mm-remove-__GFP_COLD.patch
patches.suse/mm-pagevec-rename-pagevec-drained-field.patch
patches.suse/mm-page_alloc-simplify-list-handling-in-rmqueue_bulk.patch
- patches.suse/mm-truncate-remove-all-exceptional-entries-from-pagevec-under-one-lock-fix.patch
patches.fixes/VFS-expedite-umount.patch