Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavidlohr Bueso <dbueso@suse.de>2016-06-14 10:05:46 -0700
committerDavidlohr Bueso <dbueso@suse.de>2016-06-14 10:05:46 -0700
commit969a7a8a064f4fb8bfe0657e9dd3647d0dd0b693 (patch)
treeedf0ec7cf6c8b001f40f39f0ce45e466b1dfd2d1
parent09f90dac67567bc1b68e233b7b4cf497038b65fe (diff)
locking/rwsem: Optimize write lock by reducing operations in slowpath (bsc#969756).
-rw-r--r--patches.suse/0001-locking-rwsem-Optimize-write-lock-by-reducing-operat.patch93
-rw-r--r--series.conf1
2 files changed, 94 insertions, 0 deletions
diff --git a/patches.suse/0001-locking-rwsem-Optimize-write-lock-by-reducing-operat.patch b/patches.suse/0001-locking-rwsem-Optimize-write-lock-by-reducing-operat.patch
new file mode 100644
index 0000000000..d189fd0c0b
--- /dev/null
+++ b/patches.suse/0001-locking-rwsem-Optimize-write-lock-by-reducing-operat.patch
@@ -0,0 +1,93 @@
+From c0fcb6c2d332041256dc55d8a1ec3c0a2d0befb8 Mon Sep 17 00:00:00 2001
+From: Jason Low <jason.low2@hpe.com>
+Date: Mon, 16 May 2016 17:38:00 -0700
+Subject: [PATCH] locking/rwsem: Optimize write lock by reducing operations in slowpath
+Git-commit: c0fcb6c2d332041256dc55d8a1ec3c0a2d0befb8
+Patch-mainline: Queued in -tip locking/core for v4.8
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git
+References: bsc#969756
+
+When acquiring the rwsem write lock in the slowpath, we first try
+to set count to RWSEM_WAITING_BIAS. When that is successful,
+we then atomically add the RWSEM_WAITING_BIAS in cases where
+there are other tasks on the wait list. This causes write lock
+operations to often issue multiple atomic operations.
+
+We can instead make the list_is_singular() check first, and then
+set the count accordingly, so that we issue at most 1 atomic
+operation when acquiring the write lock and reduce unnecessary
+cacheline contention.
+
+Signed-off-by: Jason Low <jason.low2@hpe.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Waiman Long<Waiman.Long@hpe.com>
+Acked-by: Davidlohr Bueso <dave@stgolabs.net>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
+Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
+Cc: Jason Low <jason.low2@hp.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Cc: Matt Turner <mattst88@gmail.com>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Hurley <peter@hurleysoftware.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Richard Henderson <rth@twiddle.net>
+Cc: Terry Rudd <terry.rudd@hpe.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Link: http://lkml.kernel.org/r/1463445486-16078-2-git-send-email-jason.low2@hpe.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
+
+---
+ kernel/locking/rwsem-xadd.c | 25 ++++++++++++++++++-------
+ 1 file changed, 18 insertions(+), 7 deletions(-)
+
+diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
+index fcbf75ac3dcb..b957da7fcb19 100644
+--- a/kernel/locking/rwsem-xadd.c
++++ b/kernel/locking/rwsem-xadd.c
+@@ -261,17 +261,28 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
+ }
+ EXPORT_SYMBOL(rwsem_down_read_failed);
+
++/*
++ * This function must be called with the sem->wait_lock held to prevent
++ * race conditions between checking the rwsem wait list and setting the
++ * sem->count accordingly.
++ */
+ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
+ {
+ /*
+- * Try acquiring the write lock. Check count first in order
+- * to reduce unnecessary expensive cmpxchg() operations.
++ * Avoid trying to acquire write lock if count isn't RWSEM_WAITING_BIAS.
+ */
+- if (count == RWSEM_WAITING_BIAS &&
+- cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS,
+- RWSEM_ACTIVE_WRITE_BIAS) == RWSEM_WAITING_BIAS) {
+- if (!list_is_singular(&sem->wait_list))
+- rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
++ if (count != RWSEM_WAITING_BIAS)
++ return false;
++
++ /*
++ * Acquire the lock by trying to set it to ACTIVE_WRITE_BIAS. If there
++ * are other tasks on the wait list, we need to add on WAITING_BIAS.
++ */
++ count = list_is_singular(&sem->wait_list) ?
++ RWSEM_ACTIVE_WRITE_BIAS :
++ RWSEM_ACTIVE_WRITE_BIAS + RWSEM_WAITING_BIAS;
++
++ if (cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS, count) == RWSEM_WAITING_BIAS) {
+ rwsem_set_owner(sem);
+ return true;
+ }
+--
+2.6.6
+
diff --git a/series.conf b/series.conf
index f3db2f14b6..df1a878854 100644
--- a/series.conf
+++ b/series.conf
@@ -368,6 +368,7 @@
patches.suse/0001-locking-ww_mutex-Report-recursive-ww_mutex-locking-e.patch
patches.suse/0001-locking-mutex-Set-and-clear-owner-using-WRITE_ONCE.patch
patches.suse/0001-locking-rtmutex-Only-warn-once-on-a-trylock-from-bad.patch
+ patches.suse/0001-locking-rwsem-Optimize-write-lock-by-reducing-operat.patch
########################################################
# locking/rcu