Home Home > GIT Browse > SLE15-AZURE
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2019-09-07 22:31:10 +0200
committerTakashi Iwai <tiwai@suse.de>2019-09-07 22:31:10 +0200
commit6326afcc67f453674ee6b88511c1634309be5181 (patch)
tree2ebcfbf7c2552d956c43671e3bb64a2ae66df632
parent07498b9965f97a9fffc4352e50ee6e8190514665 (diff)
Revert patches.suse/0001-blk-wbt-Avoid-lock-contention-and-thundering-herd-is.patch (bsc#1141543)
As we see stalls / crashes recently with the relevant code path, revert this patch tentatively. suse-commit: c8f11b1b7dd8f3b39c606906c8509b45f699f85f
-rw-r--r--block/blk-wbt.c57
1 files changed, 32 insertions, 25 deletions
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 604039c56814..32ab0b6abe2a 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -157,7 +157,7 @@ void __wbt_done(struct rq_wb *rwb, enum wbt_flags wb_acct)
int diff = limit - inflight;
if (!inflight || diff >= rwb->wb_background / 2)
- wake_up(&rqw->wait);
+ wake_up_all(&rqw->wait);
}
}
@@ -502,6 +502,30 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
return limit;
}
+static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
+ wait_queue_entry_t *wait, unsigned long rw)
+{
+ /*
+ * inc it here even if disabled, since we'll dec it at completion.
+ * this only happens if the task was sleeping in __wbt_wait(),
+ * and someone turned it off at the same time.
+ */
+ if (!rwb_enabled(rwb)) {
+ atomic_inc(&rqw->inflight);
+ return true;
+ }
+
+ /*
+ * If the waitqueue is already active and we are not the next
+ * in line to be woken up, wait for our turn.
+ */
+ if (waitqueue_active(&rqw->wait) &&
+ rqw->wait.head.next != &wait->entry)
+ return false;
+
+ return atomic_inc_below(&rqw->inflight, get_limit(rwb, rw));
+}
+
/*
* Block if we will exceed our limit, or if we are currently waiting for
* the timer to kick off queuing again.
@@ -511,32 +535,16 @@ static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock)
__acquires(lock)
{
struct rq_wait *rqw = get_rq_wait(rwb, current_is_kswapd());
- DECLARE_WAITQUEUE(wait, current);
-
- /*
- * inc it here even if disabled, since we'll dec it at completion.
- * this only happens if the task was sleeping in __wbt_wait(),
- * and someone turned it off at the same time.
- */
- if (!rwb_enabled(rwb)) {
- atomic_inc(&rqw->inflight);
- return;
- }
+ DEFINE_WAIT(wait);
- if (!waitqueue_active(&rqw->wait)
- && atomic_inc_below(&rqw->inflight, get_limit(rwb, rw)))
+ if (may_queue(rwb, rqw, &wait, rw))
+ return;
- add_wait_queue_exclusive(&rqw->wait, &wait);
do {
- set_current_state(TASK_UNINTERRUPTIBLE);
-
- if (!rwb_enabled(rwb)) {
- atomic_inc(&rqw->inflight);
- break;
- }
-
+ prepare_to_wait_exclusive(&rqw->wait, &wait,
+ TASK_UNINTERRUPTIBLE);
- if (atomic_inc_below(&rqw->inflight, get_limit(rwb, rw)))
+ if (may_queue(rwb, rqw, &wait, rw))
break;
if (lock) {
@@ -547,8 +555,7 @@ static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock)
io_schedule();
} while (1);
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&rqw->wait, &wait);
+ finish_wait(&rqw->wait, &wait);
}
static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)