Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohannes Thumshirn <jthumshirn@suse.de>2016-06-03 11:28:15 +0200
committerJohannes Thumshirn <jthumshirn@suse.de>2016-06-03 11:30:16 +0200
commit84717abfa06e02fd44e979bd6af73b447b3f0bf8 (patch)
treeae99289d1d10ad993f8d64d93bd21489eef4d60b
parent1f9d959b6862ed67e6bfe34f6573398ce6ecb2e0 (diff)
blk-mq: really fix plug list flushing for nomerge queues
-rw-r--r--patches.drivers/0050-blk-mq-really-fix-plug-list-flushing-for-nomerge-queues.patch58
-rw-r--r--series.conf1
2 files changed, 59 insertions, 0 deletions
diff --git a/patches.drivers/0050-blk-mq-really-fix-plug-list-flushing-for-nomerge-queues.patch b/patches.drivers/0050-blk-mq-really-fix-plug-list-flushing-for-nomerge-queues.patch
new file mode 100644
index 0000000000..4c20aacf4c
--- /dev/null
+++ b/patches.drivers/0050-blk-mq-really-fix-plug-list-flushing-for-nomerge-queues.patch
@@ -0,0 +1,58 @@
+From: Omar Sandoval <osandov@fb.com>
+Date: Wed, 1 Jun 2016 22:18:48 -0700
+Subject: blk-mq: really fix plug list flushing for nomerge queues
+Patch-mainline: Submitted, https://patchwork.kernel.org/patch/9149067/
+References: fate#319999
+
+Commit 0809e3ac6231 ("block: fix plug list flushing for nomerge queues")
+updated blk_mq_make_request() to set request_count even when
+blk_queue_nomerges() returns true. However, blk_mq_make_request() only
+does limited plugging and doesn't use request_count;
+blk_sq_make_request() is the one that should have been fixed. Do that
+and get rid of the unnecessary work in the mq version.
+
+Fixes: 0809e3ac6231 ("block: fix plug list flushing for nomerge queues")
+Signed-off-by: Omar Sandoval <osandov@fb.com>
+Reviewed-by: Ming Lei <tom.leiming@gmail.com>
+Reviewed-by: Jeff Moyer <jmoyer@redhat.com>
+Acked-by: Johannes Thumshirn <jthumshirn@suse.de>
+---
+
+ block/blk-mq.c | 17 ++++++++---------
+ 1 file changed, 8 insertions(+), 9 deletions(-)
+
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 29cbc1b5fbdb..f9b9049b1284 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1262,12 +1262,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
+
+ blk_queue_split(q, &bio, q->bio_split);
+
+- if (!is_flush_fua && !blk_queue_nomerges(q)) {
+- if (blk_attempt_plug_merge(q, bio, &request_count,
+- &same_queue_rq))
+- return BLK_QC_T_NONE;
+- } else
+- request_count = blk_plug_queued_count(q);
++ if (!is_flush_fua && !blk_queue_nomerges(q) &&
++ blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
++ return BLK_QC_T_NONE;
+
+ rq = blk_mq_map_request(q, bio, &data);
+ if (unlikely(!rq))
+@@ -1358,9 +1355,11 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
+
+ blk_queue_split(q, &bio, q->bio_split);
+
+- if (!is_flush_fua && !blk_queue_nomerges(q) &&
+- blk_attempt_plug_merge(q, bio, &request_count, NULL))
+- return BLK_QC_T_NONE;
++ if (!is_flush_fua && !blk_queue_nomerges(q)) {
++ if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
++ return BLK_QC_T_NONE;
++ } else
++ request_count = blk_plug_queued_count(q);
+
+ rq = blk_mq_map_request(q, bio, &data);
+ if (unlikely(!rq))
diff --git a/series.conf b/series.conf
index eb429d5b00..2cd2688c99 100644
--- a/series.conf
+++ b/series.conf
@@ -4335,6 +4335,7 @@
patches.drivers/0047-blk-throttle-don-t-parse-cgroup-path-if-trace-isn-t-enabled.patch
patches.drivers/0048-blk-mq-fix-undefined-behaviour-in-order_to_size.patch
patches.drivers/0049-blk-mq-clear-q-mq_ops-if-init-fail.patch
+ patches.drivers/0050-blk-mq-really-fix-plug-list-flushing-for-nomerge-queues.patch
patches.drivers/dax-Call-get_blocks-with-create-1-for-write-faults-t.patch
patches.drivers/ext4-Handle-transient-ENOSPC-properly-for-DAX.patch