Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Sterba <dsterba@suse.cz>2014-08-26 13:57:20 +0200
committerDavid Sterba <dsterba@suse.cz>2014-08-26 13:57:20 +0200
commitc40eb71a01a12fe4cb38646debb10e49fc2b2d0a (patch)
tree6ea15ee54ef2842b45ef3752d0fe121edd17793b
parent8fc81d38d8b63789858468f74d67d86ddb96d14e (diff)
Btrfs stable updates.
Pending or future 3.16.y fixes - Btrfs: read lock extent buffer while walking backrefs. - Btrfs: fix compressed write corruption on enospc. - Btrfs: fix csum tree corruption, duplicate and outdated checksums. - Btrfs: fill_holes: Fix slot number passed to hole_mergeable() call. - Btrfs: fix wrong write range for filemap_fdatawrite_range(). - Btrfs: fix wrong extent mapping for DirectIO. - btrfs: Return right extent when fiemap gives unaligned offset and len. - Btrfs: ensure tmpfile inode is always persisted with link count of 0. - Btrfs: don't monopolize a core when evicting inode. - Btrfs: fix task hang under heavy compressed write. - Refresh patches.fixes/Btrfs-Fix-memory-corruption-by-ulist_add_merge-on-32.
-rw-r--r--patches.fixes/Btrfs-Fix-memory-corruption-by-ulist_add_merge-on-322
-rw-r--r--patches.suse/btrfs-0003-read-lock-extent-buffer-while-walking-backrefs.patch30
-rw-r--r--patches.suse/btrfs-0006-fix-compressed-write-corruption-on-enospc.patch41
-rw-r--r--patches.suse/btrfs-0008-fix-csum-tree-corruption-duplicate-and-outdate.patch104
-rw-r--r--patches.suse/btrfs-0011-fill_holes-Fix-slot-number-passed-to-hole_merg.patch37
-rw-r--r--patches.suse/btrfs-0016-fix-wrong-write-range-for-filemap_fdatawrite_r.patch30
-rw-r--r--patches.suse/btrfs-0017-fix-wrong-extent-mapping-for-DirectIO.patch39
-rw-r--r--patches.suse/btrfs-0018-Return-right-extent-when-fiemap-gives-unaligne.patch37
-rw-r--r--patches.suse/btrfs-0025-ensure-tmpfile-inode-is-always-persisted-with-.patch76
-rw-r--r--patches.suse/btrfs-0027-don-t-monopolize-a-core-when-evicting-inode.patch80
-rw-r--r--patches.suse/btrfs-0033-fix-task-hang-under-heavy-compressed-write.patch567
-rw-r--r--series.conf10
12 files changed, 1052 insertions, 1 deletions
diff --git a/patches.fixes/Btrfs-Fix-memory-corruption-by-ulist_add_merge-on-32 b/patches.fixes/Btrfs-Fix-memory-corruption-by-ulist_add_merge-on-32
index ae163986e3..760a656763 100644
--- a/patches.fixes/Btrfs-Fix-memory-corruption-by-ulist_add_merge-on-32
+++ b/patches.fixes/Btrfs-Fix-memory-corruption-by-ulist_add_merge-on-32
@@ -75,7 +75,7 @@ Signed-off-by: Chris Mason <clm@fb.com>
if (ret < 0)
break;
if (!ret && extent_item_pos) {
-@@ -1008,9 +1007,9 @@ again:
+@@ -1011,9 +1010,9 @@ again:
goto out;
ref->inode_list = eie;
}
diff --git a/patches.suse/btrfs-0003-read-lock-extent-buffer-while-walking-backrefs.patch b/patches.suse/btrfs-0003-read-lock-extent-buffer-while-walking-backrefs.patch
new file mode 100644
index 0000000000..70b570ea4c
--- /dev/null
+++ b/patches.suse/btrfs-0003-read-lock-extent-buffer-while-walking-backrefs.patch
@@ -0,0 +1,30 @@
+From: Filipe Manana <fdmanana@suse.com>
+Date: Wed, 2 Jul 2014 20:07:54 +0100
+Patch-mainline: 3.17
+Git-commit: 6f7ff6d7832c6be13e8c95598884dbc40ad69fb7
+Subject: [PATCH] Btrfs: read lock extent buffer while walking backrefs
+
+Before processing the extent buffer, acquire a read lock on it, so
+that we're safe against concurrent updates on the extent buffer.
+
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: David Sterba <dsterba@suse.cz>
+---
+ fs/btrfs/backref.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -1001,8 +1001,11 @@ again:
+ ret = -EIO;
+ goto out;
+ }
++ btrfs_tree_read_lock(eb);
++ btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ ret = find_extent_in_eb(eb, bytenr,
+ *extent_item_pos, &eie);
++ btrfs_tree_read_unlock_blocking(eb);
+ free_extent_buffer(eb);
+ if (ret < 0)
+ goto out;
diff --git a/patches.suse/btrfs-0006-fix-compressed-write-corruption-on-enospc.patch b/patches.suse/btrfs-0006-fix-compressed-write-corruption-on-enospc.patch
new file mode 100644
index 0000000000..c4e141f246
--- /dev/null
+++ b/patches.suse/btrfs-0006-fix-compressed-write-corruption-on-enospc.patch
@@ -0,0 +1,41 @@
+From: Liu Bo <bo.li.liu@oracle.com>
+Date: Thu, 24 Jul 2014 22:48:05 +0800
+Patch-mainline: 3.17
+Git-commit: ce62003f690dff38d3164a632ec69efa15c32cbf
+Subject: [PATCH] Btrfs: fix compressed write corruption on enospc
+
+When failing to allocate space for the whole compressed extent, we'll
+fallback to uncompressed IO, but we've forgotten to redirty the pages
+which belong to this compressed extent, and these 'clean' pages will
+simply skip 'submit' part and go to endio directly, at last we got data
+corruption as we write nothing.
+
+Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
+Tested-By: Martin Steigerwald <martin@lichtvoll.de>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: David Sterba <dsterba@suse.cz>
+---
+ fs/btrfs/inode.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -709,6 +709,18 @@ retry:
+ unlock_extent(io_tree, async_extent->start,
+ async_extent->start +
+ async_extent->ram_size - 1);
++
++ /*
++ * we need to redirty the pages if we decide to
++ * fallback to uncompressed IO, otherwise we
++ * will not submit these pages down to lower
++ * layers.
++ */
++ extent_range_redirty_for_io(inode,
++ async_extent->start,
++ async_extent->start +
++ async_extent->ram_size - 1);
++
+ goto retry;
+ }
+ goto out_free;
diff --git a/patches.suse/btrfs-0008-fix-csum-tree-corruption-duplicate-and-outdate.patch b/patches.suse/btrfs-0008-fix-csum-tree-corruption-duplicate-and-outdate.patch
new file mode 100644
index 0000000000..c72d87b47c
--- /dev/null
+++ b/patches.suse/btrfs-0008-fix-csum-tree-corruption-duplicate-and-outdate.patch
@@ -0,0 +1,104 @@
+From: Filipe Manana <fdmanana@suse.com>
+Date: Sat, 9 Aug 2014 21:22:27 +0100
+Patch-mainline: 3.17
+Git-commit: 27b9a8122ff71a8cadfbffb9c4f0694300464f3b
+Subject: [PATCH] Btrfs: fix csum tree corruption, duplicate and outdated
+ checksums
+
+Under rare circumstances we can end up leaving 2 versions of a checksum
+for the same file extent range.
+
+The reason for this is that after calling btrfs_next_leaf we process
+slot 0 of the leaf it returns, instead of processing the slot set in
+path->slots[0]. Most of the time (by far) path->slots[0] is 0, but after
+btrfs_next_leaf() releases the path and before it searches for the next
+leaf, another task might cause a split of the next leaf, which migrates
+some of its keys to the leaf we were processing before calling
+btrfs_next_leaf(). In this case btrfs_next_leaf() returns again the
+same leaf but with path->slots[0] having a slot number corresponding
+to the first new key it got, that is, a slot number that didn't exist
+before calling btrfs_next_leaf(), as the leaf now has more keys than
+it had before. So we must really process the returned leaf starting at
+path->slots[0] always, as it isn't always 0, and the key at slot 0 can
+have an offset much lower than our search offset/bytenr.
+
+For example, consider the following scenario, where we have:
+
+sums->bytenr: 40157184, sums->len: 16384, sums end: 40173568
+four 4kb file data blocks with offsets 40157184, 40161280, 40165376, 40169472
+
+ Leaf N:
+
+ slot = 0 slot = btrfs_header_nritems() - 1
+ |-------------------------------------------------------------------|
+ | [(CSUM CSUM 39239680), size 8] ... [(CSUM CSUM 40116224), size 4] |
+ |-------------------------------------------------------------------|
+
+ Leaf N + 1:
+
+ slot = 0 slot = btrfs_header_nritems() - 1
+ |--------------------------------------------------------------------|
+ | [(CSUM CSUM 40161280), size 32] ... [((CSUM CSUM 40615936), size 8 |
+ |--------------------------------------------------------------------|
+
+Because we are at the last slot of leaf N, we call btrfs_next_leaf() to
+find the next highest key, which releases the current path and then searches
+for that next key. However after releasing the path and before finding that
+next key, the item at slot 0 of leaf N + 1 gets moved to leaf N, due to a call
+to ctree.c:push_leaf_left() (via ctree.c:split_leaf()), and therefore
+btrfs_next_leaf() will returns us a path again with leaf N but with the slot
+pointing to its new last key (CSUM CSUM 40161280). This new version of leaf N
+is then:
+
+ slot = 0 slot = btrfs_header_nritems() - 2 slot = btrfs_header_nritems() - 1
+ |----------------------------------------------------------------------------------------------------|
+ | [(CSUM CSUM 39239680), size 8] ... [(CSUM CSUM 40116224), size 4] [(CSUM CSUM 40161280), size 32] |
+ |----------------------------------------------------------------------------------------------------|
+
+And incorrecly using slot 0, makes us set next_offset to 39239680 and we jump
+into the "insert:" label, which will set tmp to:
+
+ tmp = min((sums->len - total_bytes) >> blocksize_bits,
+ (next_offset - file_key.offset) >> blocksize_bits) =
+ min((16384 - 0) >> 12, (39239680 - 40157184) >> 12) =
+ min(4, (u64)-917504 = 18446744073708634112 >> 12) = 4
+
+and
+
+ ins_size = csum_size * tmp = 4 * 4 = 16 bytes.
+
+In other words, we insert a new csum item in the tree with key
+(CSUM_OBJECTID CSUM_KEY 40157184 = sums->bytenr) that contains the checksums
+for all the data (4 blocks of 4096 bytes each = sums->len). Which is wrong,
+because the item with key (CSUM CSUM 40161280) (the one that was moved from
+leaf N + 1 to the end of leaf N) contains the old checksums of the last 12288
+bytes of our data and won't get those old checksums removed.
+
+So this leaves us 2 different checksums for 3 4kb blocks of data in the tree,
+and breaks the logical rule:
+
+ Key_N+1.offset >= Key_N.offset + length_of_data_its_checksums_cover
+
+An obvious bad effect of this is that a subsequent csum tree lookup to get
+the checksum of any of the blocks with logical offset of 40161280, 40165376
+or 40169472 (the last 3 4kb blocks of file data), will get the old checksums.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: David Sterba <dsterba@suse.cz>
+---
+ fs/btrfs/file-item.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/btrfs/file-item.c
++++ b/fs/btrfs/file-item.c
+@@ -756,7 +756,7 @@ again:
+ found_next = 1;
+ if (ret != 0)
+ goto insert;
+- slot = 0;
++ slot = path->slots[0];
+ }
+ btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
+ if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
diff --git a/patches.suse/btrfs-0011-fill_holes-Fix-slot-number-passed-to-hole_merg.patch b/patches.suse/btrfs-0011-fill_holes-Fix-slot-number-passed-to-hole_merg.patch
new file mode 100644
index 0000000000..9b9a67421b
--- /dev/null
+++ b/patches.suse/btrfs-0011-fill_holes-Fix-slot-number-passed-to-hole_merg.patch
@@ -0,0 +1,37 @@
+From: chandan <chandan@linux.vnet.ibm.com>
+Date: Tue, 1 Jul 2014 12:04:28 +0530
+Patch-mainline: 3.17
+Git-commit: 1707e26d6ab05c477a91d260e31fda7c6c38588e
+Subject: [PATCH] Btrfs: fill_holes: Fix slot number passed to
+ hole_mergeable() call.
+
+For a non-existent key, btrfs_search_slot() sets path->slots[0] to the slot
+where the key could have been present, which in this case would be the slot
+containing the extent item which would be the next neighbor of the file range
+being punched. The current code passes an incremented path->slots[0] and we
+skip to the wrong file extent item. This would mean that we would fail to
+merge the "yet to be created" hole with the next neighboring hole (if one
+exists). Fix this.
+
+Signed-off-by: Chandan Rajendra <chandan@linux.vnet.ibm.com>
+Reviewed-by: Wang Shilong <wangsl.fnst@cn.fujitsu.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: David Sterba <dsterba@suse.cz>
+---
+ fs/btrfs/file.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -2112,10 +2112,9 @@ static int fill_holes(struct btrfs_trans
+ goto out;
+ }
+
+- if (hole_mergeable(inode, leaf, path->slots[0]+1, offset, end)) {
++ if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
+ u64 num_bytes;
+
+- path->slots[0]++;
+ key.offset = offset;
+ btrfs_set_item_key_safe(root, path, &key);
+ fi = btrfs_item_ptr(leaf, path->slots[0],
diff --git a/patches.suse/btrfs-0016-fix-wrong-write-range-for-filemap_fdatawrite_r.patch b/patches.suse/btrfs-0016-fix-wrong-write-range-for-filemap_fdatawrite_r.patch
new file mode 100644
index 0000000000..0d928a1ba8
--- /dev/null
+++ b/patches.suse/btrfs-0016-fix-wrong-write-range-for-filemap_fdatawrite_r.patch
@@ -0,0 +1,30 @@
+From: Wang Shilong <wangsl.fnst@cn.fujitsu.com>
+Date: Thu, 17 Jul 2014 11:44:13 +0800
+Patch-mainline: 3.17
+Git-commit: 9a025a0860ccc0f02af153c966bc1f83e5d9fc62
+Subject: [PATCH] Btrfs: fix wrong write range for
+ filemap_fdatawrite_range()
+
+filemap_fdatawrite_range() expect the third arg to be @end
+not @len, fix it.
+
+Signed-off-by: Wang Shilong <wangsl.fnst@cn.fujitsu.com>
+Reviewed-by: David Sterba <dsterba@suse.cz>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: David Sterba <dsterba@suse.cz>
+---
+ fs/btrfs/inode.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -7534,7 +7534,8 @@ static ssize_t btrfs_direct_IO(int rw, s
+ count = iov_iter_count(iter);
+ if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
+ &BTRFS_I(inode)->runtime_flags))
+- filemap_fdatawrite_range(inode->i_mapping, offset, count);
++ filemap_fdatawrite_range(inode->i_mapping, offset,
++ offset + count - 1);
+
+ if (rw & WRITE) {
+ /*
diff --git a/patches.suse/btrfs-0017-fix-wrong-extent-mapping-for-DirectIO.patch b/patches.suse/btrfs-0017-fix-wrong-extent-mapping-for-DirectIO.patch
new file mode 100644
index 0000000000..3f0c661ea9
--- /dev/null
+++ b/patches.suse/btrfs-0017-fix-wrong-extent-mapping-for-DirectIO.patch
@@ -0,0 +1,39 @@
+From: Wang Shilong <wangsl.fnst@cn.fujitsu.com>
+Date: Thu, 17 Jul 2014 11:44:14 +0800
+Patch-mainline: 3.17
+Git-commit: e2eca69dc6c09d968d69312b9899968a9b03a4a9
+Subject: [PATCH] Btrfs: fix wrong extent mapping for DirectIO
+
+btrfs_next_leaf() will use current leaf's last key to search
+and then return a bigger one. So it may still return a file extent
+item that is smaller than expected value and we will
+get an overflow here for @em->len.
+
+This is easy to reproduce for Btrfs Direct writting, it did not
+cause any problem, because writting will re-insert right mapping later.
+
+However, by hacking code to make DIO support compression, wrong extent
+mapping is kept and it encounter merging failure(EEXIST) quickly.
+
+Fix this problem by looping to find next file extent item that is bigger
+than @start or we could not find anything more.
+
+Signed-off-by: Wang Shilong <wangsl.fnst@cn.fujitsu.com>
+Reviewed-by: David Sterba <dsterba@suse.cz>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: David Sterba <dsterba@suse.cz>
+---
+ fs/btrfs/inode.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -6275,6 +6275,8 @@ next:
+ goto not_found;
+ if (start + len <= found_key.offset)
+ goto not_found;
++ if (start > found_key.offset)
++ goto next;
+ em->start = start;
+ em->orig_start = start;
+ em->len = found_key.offset - start;
diff --git a/patches.suse/btrfs-0018-Return-right-extent-when-fiemap-gives-unaligne.patch b/patches.suse/btrfs-0018-Return-right-extent-when-fiemap-gives-unaligne.patch
new file mode 100644
index 0000000000..9c390d8240
--- /dev/null
+++ b/patches.suse/btrfs-0018-Return-right-extent-when-fiemap-gives-unaligne.patch
@@ -0,0 +1,37 @@
+From: Qu Wenruo <quwenruo@cn.fujitsu.com>
+Date: Fri, 18 Jul 2014 09:55:43 +0800
+Patch-mainline: 3.17
+Git-commit: 2c91943b5066314a8bb9f0a65584e5e4cd92ea63
+Subject: [PATCH] btrfs: Return right extent when fiemap gives unaligned
+ offset and len.
+
+When page aligned start and len passed to extent_fiemap(), the result is
+good, but when start and len is not aligned, e.g. start = 1 and len =
+4095 is passed to extent_fiemap(), it returns no extent.
+
+The problem is that start and len is all rounded down which causes the
+problem. This patch will round down start and round up (start + len) to
+return right extent.
+
+Reported-by: Chandan Rajendra <chandan@linux.vnet.ibm.com>
+Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
+Reviewed-by: David Sterba <dsterba@suse.cz>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: David Sterba <dsterba@suse.cz>
+---
+ fs/btrfs/extent_io.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -4213,8 +4213,8 @@ int extent_fiemap(struct inode *inode, s
+ return -ENOMEM;
+ path->leave_spinning = 1;
+
+- start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
+- len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
++ start = round_down(start, BTRFS_I(inode)->root->sectorsize);
++ len = round_up(max, BTRFS_I(inode)->root->sectorsize) - start;
+
+ /*
+ * lookup the last file extent. We're not using i_size here
diff --git a/patches.suse/btrfs-0025-ensure-tmpfile-inode-is-always-persisted-with-.patch b/patches.suse/btrfs-0025-ensure-tmpfile-inode-is-always-persisted-with-.patch
new file mode 100644
index 0000000000..91c211195a
--- /dev/null
+++ b/patches.suse/btrfs-0025-ensure-tmpfile-inode-is-always-persisted-with-.patch
@@ -0,0 +1,76 @@
+From: Filipe Manana <fdmanana@suse.com>
+Date: Fri, 1 Aug 2014 00:10:32 +0100
+Patch-mainline: 3.17
+Git-commit: 5762b5c958abbecb7fb9f4596a6476d1ce91ecf6
+Subject: [PATCH] Btrfs: ensure tmpfile inode is always persisted with
+ link count of 0
+
+If we open a file with O_TMPFILE, don't do any further operation on
+it (so that the inode item isn't updated) and then force a transaction
+commit, we get a persisted inode item with a link count of 1, and not 0
+as it should be.
+
+Steps to reproduce it (requires a modern xfs_io with -T support):
+
+ $ mkfs.btrfs -f /dev/sdd
+ $ mount -o /dev/sdd /mnt
+ $ xfs_io -T /mnt &
+ $ sync
+
+Then btrfs-debug-tree shows the inode item with a link count of 1:
+
+ $ btrfs-debug-tree /dev/sdd
+ (...)
+ fs tree key (FS_TREE ROOT_ITEM 0)
+ leaf 29556736 items 4 free space 15851 generation 6 owner 5
+ fs uuid f164d01b-1b92-481d-a4e4-435fb0f843d0
+ chunk uuid 0e3d0e56-bcca-4a1c-aa5f-cec2c6f4f7a6
+ item 0 key (256 INODE_ITEM 0) itemoff 16123 itemsize 160
+ inode generation 3 transid 6 size 0 block group 0 mode 40755 links 1
+ item 1 key (256 INODE_REF 256) itemoff 16111 itemsize 12
+ inode ref index 0 namelen 2 name: ..
+ item 2 key (257 INODE_ITEM 0) itemoff 15951 itemsize 160
+ inode generation 6 transid 6 size 0 block group 0 mode 100600 links 1
+ item 3 key (ORPHAN ORPHAN_ITEM 257) itemoff 15951 itemsize 0
+ orphan item
+ checksum tree key (CSUM_TREE ROOT_ITEM 0)
+ (...)
+
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: David Sterba <dsterba@suse.cz>
+---
+ fs/btrfs/inode.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -5606,6 +5606,13 @@ static struct inode *btrfs_new_inode(str
+ }
+
+ /*
++ * O_TMPFILE, set link count to 0, so that after this point,
++ * we fill in an inode item with the correct link count.
++ */
++ if (!name)
++ set_nlink(inode, 0);
++
++ /*
+ * we have to initialize this early, so we can reclaim the inode
+ * number if we fail afterwards in this function.
+ */
+@@ -9013,6 +9020,14 @@ static int btrfs_tmpfile(struct inode *d
+ if (ret)
+ goto out;
+
++ /*
++ * We set number of links to 0 in btrfs_new_inode(), and here we set
++ * it to 1 because d_tmpfile() will issue a warning if the count is 0,
++ * through:
++ *
++ * d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
++ */
++ set_nlink(inode, 1);
+ d_tmpfile(dentry, inode);
+ mark_inode_dirty(inode);
+
diff --git a/patches.suse/btrfs-0027-don-t-monopolize-a-core-when-evicting-inode.patch b/patches.suse/btrfs-0027-don-t-monopolize-a-core-when-evicting-inode.patch
new file mode 100644
index 0000000000..4930d92d34
--- /dev/null
+++ b/patches.suse/btrfs-0027-don-t-monopolize-a-core-when-evicting-inode.patch
@@ -0,0 +1,80 @@
+From: Filipe Manana <fdmanana@suse.com>
+Date: Fri, 8 Aug 2014 02:47:05 +0100
+Patch-mainline: 3.17
+Git-commit: 7064dd5c36187725e7ccfd837e07678ae435d3f5
+Subject: [PATCH] Btrfs: don't monopolize a core when evicting inode
+
+If an inode has a very large number of extent maps, we can spend
+a lot of time freeing them, which triggers a soft lockup warning.
+Therefore reschedule if we need to when freeing the extent maps
+while evicting the inode.
+
+I could trigger this all the time by running xfstests/generic/299 on
+a file system with the no-holes feature enabled. That test creates
+an inode with 11386677 extent maps.
+
+ $ mkfs.btrfs -f -O no-holes $TEST_DEV
+ $ MKFS_OPTIONS="-O no-holes" ./check generic/299
+ generic/299 382s ...
+ Message from syslogd@debian-vm3 at Aug 7 10:44:29 ...
+ kernel:[85304.208017] BUG: soft lockup - CPU#0 stuck for 22s! [umount:25330]
+ 384s
+ Ran: generic/299
+ Passed all 1 tests
+
+ $ dmesg
+ (...)
+ [86304.300017] BUG: soft lockup - CPU#0 stuck for 23s! [umount:25330]
+ (...)
+ [86304.300036] Call Trace:
+ [86304.300036] [<ffffffff81698ba9>] __slab_free+0x54/0x295
+ [86304.300036] [<ffffffffa02ee9cc>] ? free_extent_map+0x5c/0xb0 [btrfs]
+ [86304.300036] [<ffffffff811a6cd2>] kmem_cache_free+0x282/0x2a0
+ [86304.300036] [<ffffffffa02ee9cc>] free_extent_map+0x5c/0xb0 [btrfs]
+ [86304.300036] [<ffffffffa02e3775>] btrfs_evict_inode+0xd5/0x660 [btrfs]
+ [86304.300036] [<ffffffff811e7c8d>] ? __inode_wait_for_writeback+0x6d/0xc0
+ [86304.300036] [<ffffffff816a389b>] ? _raw_spin_unlock+0x2b/0x40
+ [86304.300036] [<ffffffff811d8cbb>] evict+0xab/0x180
+ [86304.300036] [<ffffffff811d8dce>] dispose_list+0x3e/0x60
+ [86304.300036] [<ffffffff811d9b04>] evict_inodes+0xf4/0x110
+ [86304.300036] [<ffffffff811bd953>] generic_shutdown_super+0x53/0x110
+ [86304.300036] [<ffffffff811bdaa6>] kill_anon_super+0x16/0x30
+ [86304.300036] [<ffffffffa02a78ba>] btrfs_kill_super+0x1a/0xa0 [btrfs]
+ [86304.300036] [<ffffffff811bd3a9>] deactivate_locked_super+0x59/0x80
+ [86304.300036] [<ffffffff811be44e>] deactivate_super+0x4e/0x70
+ [86304.300036] [<ffffffff811dec14>] mntput_no_expire+0x174/0x1f0
+ [86304.300036] [<ffffffff811deab7>] ? mntput_no_expire+0x17/0x1f0
+ [86304.300036] [<ffffffff811e0517>] SyS_umount+0x97/0x100
+ (...)
+
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: Satoru Takeuchi <takeuchi_satoru@jp.fujitsu.com>
+Tested-by: Satoru Takeuchi <takeuchi_satoru@jp.fujitsu.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: David Sterba <dsterba@suse.cz>
+---
+ fs/btrfs/inode.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4674,6 +4674,11 @@ static void evict_inode_truncate_pages(s
+ clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
+ remove_extent_mapping(map_tree, em);
+ free_extent_map(em);
++ if (need_resched()) {
++ write_unlock(&map_tree->lock);
++ cond_resched();
++ write_lock(&map_tree->lock);
++ }
+ }
+ write_unlock(&map_tree->lock);
+
+@@ -4696,6 +4701,7 @@ static void evict_inode_truncate_pages(s
+ &cached_state, GFP_NOFS);
+ free_extent_state(state);
+
++ cond_resched();
+ spin_lock(&io_tree->lock);
+ }
+ spin_unlock(&io_tree->lock);
diff --git a/patches.suse/btrfs-0033-fix-task-hang-under-heavy-compressed-write.patch b/patches.suse/btrfs-0033-fix-task-hang-under-heavy-compressed-write.patch
new file mode 100644
index 0000000000..7baa506a64
--- /dev/null
+++ b/patches.suse/btrfs-0033-fix-task-hang-under-heavy-compressed-write.patch
@@ -0,0 +1,567 @@
+From: Liu Bo <bo.li.liu@oracle.com>
+Date: Fri, 15 Aug 2014 23:36:53 +0800
+Patch-mainline: 3.17
+Git-commit: 9e0af23764344f7f1b68e4eefbe7dc865018b63d
+Subject: [PATCH] Btrfs: fix task hang under heavy compressed write
+
+This has been reported and discussed for a long time, and this hang occurs in
+both 3.15 and 3.16.
+
+Btrfs now migrates to use kernel workqueue, but it introduces this hang problem.
+
+Btrfs has a kind of work queued as an ordered way, which means that its
+ordered_func() must be processed in the way of FIFO, so it usually looks like --
+
+normal_work_helper(arg)
+ work = container_of(arg, struct btrfs_work, normal_work);
+
+ work->func() <---- (we name it work X)
+ for ordered_work in wq->ordered_list
+ ordered_work->ordered_func()
+ ordered_work->ordered_free()
+
+The hang is a rare case, first when we find free space, we get an uncached block
+group, then we go to read its free space cache inode for free space information,
+so it will
+
+file a readahead request
+ btrfs_readpages()
+ for page that is not in page cache
+ __do_readpage()
+ submit_extent_page()
+ btrfs_submit_bio_hook()
+ btrfs_bio_wq_end_io()
+ submit_bio()
+ end_workqueue_bio() <--(ret by the 1st endio)
+ queue a work(named work Y) for the 2nd
+ also the real endio()
+
+So the hang occurs when work Y's work_struct and work X's work_struct happens
+to share the same address.
+
+A bit more explanation,
+
+A,B,C -- struct btrfs_work
+arg -- struct work_struct
+
+kthread:
+worker_thread()
+ pick up a work_struct from @worklist
+ process_one_work(arg)
+ worker->current_work = arg; <-- arg is A->normal_work
+ worker->current_func(arg)
+ normal_work_helper(arg)
+ A = container_of(arg, struct btrfs_work, normal_work);
+
+ A->func()
+ A->ordered_func()
+ A->ordered_free() <-- A gets freed
+
+ B->ordered_func()
+ submit_compressed_extents()
+ find_free_extent()
+ load_free_space_inode()
+ ... <-- (the above readhead stack)
+ end_workqueue_bio()
+ btrfs_queue_work(work C)
+ B->ordered_free()
+
+As if work A has a high priority in wq->ordered_list and there are more ordered
+works queued after it, such as B->ordered_func(), its memory could have been
+freed before normal_work_helper() returns, which means that kernel workqueue
+code worker_thread() still has worker->current_work pointer to be work
+A->normal_work's, ie. arg's address.
+
+Meanwhile, work C is allocated after work A is freed, work C->normal_work
+and work A->normal_work are likely to share the same address(I confirmed this
+with ftrace output, so I'm not just guessing, it's rare though).
+
+When another kthread picks up work C->normal_work to process, and finds our
+kthread is processing it(see find_worker_executing_work()), it'll think
+work C as a collision and skip then, which ends up nobody processing work C.
+
+So the situation is that our kthread is waiting forever on work C.
+
+Besides, there're other cases that can lead to deadlock, but the real problem
+is that all btrfs workqueue shares one work->func, -- normal_work_helper,
+so this makes each workqueue to have its own helper function, but only a
+wraper pf normal_work_helper.
+
+With this patch, I no long hit the above hang.
+
+Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
+Signed-off-by: Chris Mason <clm@fb.com>
+Signed-off-by: David Sterba <dsterba@suse.cz>
+---
+ fs/btrfs/async-thread.c | 44 +++++++++++++++++++++++++++++++--------
+ fs/btrfs/async-thread.h | 28 +++++++++++++++++++++++-
+ fs/btrfs/delayed-inode.c | 4 +--
+ fs/btrfs/disk-io.c | 53 +++++++++++++++++++++++++----------------------
+ fs/btrfs/extent-tree.c | 7 +++---
+ fs/btrfs/inode.c | 35 ++++++++++++++++++++-----------
+ fs/btrfs/ordered-data.c | 1
+ fs/btrfs/qgroup.c | 1
+ fs/btrfs/raid56.c | 9 +++++--
+ fs/btrfs/reada.c | 3 +-
+ fs/btrfs/scrub.c | 14 +++++++-----
+ fs/btrfs/volumes.c | 3 +-
+ 12 files changed, 141 insertions(+), 61 deletions(-)
+
+--- a/fs/btrfs/async-thread.c
++++ b/fs/btrfs/async-thread.c
+@@ -22,7 +22,6 @@
+ #include <linux/list.h>
+ #include <linux/spinlock.h>
+ #include <linux/freezer.h>
+-#include <linux/workqueue.h>
+ #include "async-thread.h"
+ #include "ctree.h"
+
+@@ -55,8 +54,39 @@ struct btrfs_workqueue {
+ struct __btrfs_workqueue *high;
+ };
+
+-static inline struct __btrfs_workqueue
+-*__btrfs_alloc_workqueue(const char *name, int flags, int max_active,
++static void normal_work_helper(struct btrfs_work *work);
++
++#define BTRFS_WORK_HELPER(name) \
++void btrfs_##name(struct work_struct *arg) \
++{ \
++ struct btrfs_work *work = container_of(arg, struct btrfs_work, \
++ normal_work); \
++ normal_work_helper(work); \
++}
++
++BTRFS_WORK_HELPER(worker_helper);
++BTRFS_WORK_HELPER(delalloc_helper);
++BTRFS_WORK_HELPER(flush_delalloc_helper);
++BTRFS_WORK_HELPER(cache_helper);
++BTRFS_WORK_HELPER(submit_helper);
++BTRFS_WORK_HELPER(fixup_helper);
++BTRFS_WORK_HELPER(endio_helper);
++BTRFS_WORK_HELPER(endio_meta_helper);
++BTRFS_WORK_HELPER(endio_meta_write_helper);
++BTRFS_WORK_HELPER(endio_raid56_helper);
++BTRFS_WORK_HELPER(rmw_helper);
++BTRFS_WORK_HELPER(endio_write_helper);
++BTRFS_WORK_HELPER(freespace_write_helper);
++BTRFS_WORK_HELPER(delayed_meta_helper);
++BTRFS_WORK_HELPER(readahead_helper);
++BTRFS_WORK_HELPER(qgroup_rescan_helper);
++BTRFS_WORK_HELPER(extent_refs_helper);
++BTRFS_WORK_HELPER(scrub_helper);
++BTRFS_WORK_HELPER(scrubwrc_helper);
++BTRFS_WORK_HELPER(scrubnc_helper);
++
++static struct __btrfs_workqueue *
++__btrfs_alloc_workqueue(const char *name, int flags, int max_active,
+ int thresh)
+ {
+ struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
+@@ -232,13 +262,11 @@ static void run_ordered_work(struct __bt
+ spin_unlock_irqrestore(lock, flags);
+ }
+
+-static void normal_work_helper(struct work_struct *arg)
++static void normal_work_helper(struct btrfs_work *work)
+ {
+- struct btrfs_work *work;
+ struct __btrfs_workqueue *wq;
+ int need_order = 0;
+
+- work = container_of(arg, struct btrfs_work, normal_work);
+ /*
+ * We should not touch things inside work in the following cases:
+ * 1) after work->func() if it has no ordered_free
+@@ -262,7 +290,7 @@ static void normal_work_helper(struct wo
+ trace_btrfs_all_work_done(work);
+ }
+
+-void btrfs_init_work(struct btrfs_work *work,
++void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
+ btrfs_func_t func,
+ btrfs_func_t ordered_func,
+ btrfs_func_t ordered_free)
+@@ -270,7 +298,7 @@ void btrfs_init_work(struct btrfs_work *
+ work->func = func;
+ work->ordered_func = ordered_func;
+ work->ordered_free = ordered_free;
+- INIT_WORK(&work->normal_work, normal_work_helper);
++ INIT_WORK(&work->normal_work, uniq_func);
+ INIT_LIST_HEAD(&work->ordered_list);
+ work->flags = 0;
+ }
+--- a/fs/btrfs/async-thread.h
++++ b/fs/btrfs/async-thread.h
+@@ -19,12 +19,14 @@
+
+ #ifndef __BTRFS_ASYNC_THREAD_
+ #define __BTRFS_ASYNC_THREAD_
++#include <linux/workqueue.h>
+
+ struct btrfs_workqueue;
+ /* Internal use only */
+ struct __btrfs_workqueue;
+ struct btrfs_work;
+ typedef void (*btrfs_func_t)(struct btrfs_work *arg);
++typedef void (*btrfs_work_func_t)(struct work_struct *arg);
+
+ struct btrfs_work {
+ btrfs_func_t func;
+@@ -38,11 +40,35 @@ struct btrfs_work {
+ unsigned long flags;
+ };
+
++#define BTRFS_WORK_HELPER_PROTO(name) \
++void btrfs_##name(struct work_struct *arg)
++
++BTRFS_WORK_HELPER_PROTO(worker_helper);
++BTRFS_WORK_HELPER_PROTO(delalloc_helper);
++BTRFS_WORK_HELPER_PROTO(flush_delalloc_helper);
++BTRFS_WORK_HELPER_PROTO(cache_helper);
++BTRFS_WORK_HELPER_PROTO(submit_helper);
++BTRFS_WORK_HELPER_PROTO(fixup_helper);
++BTRFS_WORK_HELPER_PROTO(endio_helper);
++BTRFS_WORK_HELPER_PROTO(endio_meta_helper);
++BTRFS_WORK_HELPER_PROTO(endio_meta_write_helper);
++BTRFS_WORK_HELPER_PROTO(endio_raid56_helper);
++BTRFS_WORK_HELPER_PROTO(rmw_helper);
++BTRFS_WORK_HELPER_PROTO(endio_write_helper);
++BTRFS_WORK_HELPER_PROTO(freespace_write_helper);
++BTRFS_WORK_HELPER_PROTO(delayed_meta_helper);
++BTRFS_WORK_HELPER_PROTO(readahead_helper);
++BTRFS_WORK_HELPER_PROTO(qgroup_rescan_helper);
++BTRFS_WORK_HELPER_PROTO(extent_refs_helper);
++BTRFS_WORK_HELPER_PROTO(scrub_helper);
++BTRFS_WORK_HELPER_PROTO(scrubwrc_helper);
++BTRFS_WORK_HELPER_PROTO(scrubnc_helper);
++
+ struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
+ int flags,
+ int max_active,
+ int thresh);
+-void btrfs_init_work(struct btrfs_work *work,
++void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper,
+ btrfs_func_t func,
+ btrfs_func_t ordered_func,
+ btrfs_func_t ordered_free);
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -1395,8 +1395,8 @@ static int btrfs_wq_run_delayed_node(str
+ return -ENOMEM;
+
+ async_work->delayed_root = delayed_root;
+- btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root,
+- NULL, NULL);
++ btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
++ btrfs_async_run_delayed_root, NULL, NULL);
+ async_work->nr = nr;
+
+ btrfs_queue_work(root->fs_info->delayed_workers, &async_work->work);
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -39,7 +39,6 @@
+ #include "btrfs_inode.h"
+ #include "volumes.h"
+ #include "print-tree.h"
+-#include "async-thread.h"
+ #include "locking.h"
+ #include "tree-log.h"
+ #include "free-space-cache.h"
+@@ -695,35 +694,41 @@ static void end_workqueue_bio(struct bio
+ {
+ struct end_io_wq *end_io_wq = bio->bi_private;
+ struct btrfs_fs_info *fs_info;
++ struct btrfs_workqueue *wq;
++ btrfs_work_func_t func;
+
+ fs_info = end_io_wq->info;
+ end_io_wq->error = err;
+- btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL);
+
+ if (bio->bi_rw & REQ_WRITE) {
+- if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
+- btrfs_queue_work(fs_info->endio_meta_write_workers,
+- &end_io_wq->work);
+- else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
+- btrfs_queue_work(fs_info->endio_freespace_worker,
+- &end_io_wq->work);
+- else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
+- btrfs_queue_work(fs_info->endio_raid56_workers,
+- &end_io_wq->work);
+- else
+- btrfs_queue_work(fs_info->endio_write_workers,
+- &end_io_wq->work);
++ if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
++ wq = fs_info->endio_meta_write_workers;
++ func = btrfs_endio_meta_write_helper;
++ } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
++ wq = fs_info->endio_freespace_worker;
++ func = btrfs_freespace_write_helper;
++ } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
++ wq = fs_info->endio_raid56_workers;
++ func = btrfs_endio_raid56_helper;
++ } else {
++ wq = fs_info->endio_write_workers;
++ func = btrfs_endio_write_helper;
++ }
+ } else {
+- if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
+- btrfs_queue_work(fs_info->endio_raid56_workers,
+- &end_io_wq->work);
+- else if (end_io_wq->metadata)
+- btrfs_queue_work(fs_info->endio_meta_workers,
+- &end_io_wq->work);
+- else
+- btrfs_queue_work(fs_info->endio_workers,
+- &end_io_wq->work);
++ if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
++ wq = fs_info->endio_raid56_workers;
++ func = btrfs_endio_raid56_helper;
++ } else if (end_io_wq->metadata) {
++ wq = fs_info->endio_meta_workers;
++ func = btrfs_endio_meta_helper;
++ } else {
++ wq = fs_info->endio_workers;
++ func = btrfs_endio_helper;
++ }
+ }
++
++ btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
++ btrfs_queue_work(wq, &end_io_wq->work);
+ }
+
+ /*
+@@ -830,7 +835,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_
+ async->submit_bio_start = submit_bio_start;
+ async->submit_bio_done = submit_bio_done;
+
+- btrfs_init_work(&async->work, run_one_async_start,
++ btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
+ run_one_async_done, run_one_async_free);
+
+ async->bio_flags = bio_flags;
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -552,7 +552,8 @@ static int cache_block_group(struct btrf
+ caching_ctl->block_group = cache;
+ caching_ctl->progress = cache->key.objectid;
+ atomic_set(&caching_ctl->count, 1);
+- btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
++ btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
++ caching_thread, NULL, NULL);
+
+ spin_lock(&cache->lock);
+ /*
+@@ -2749,8 +2750,8 @@ int btrfs_async_run_delayed_refs(struct
+ async->sync = 0;
+ init_completion(&async->wait);
+
+- btrfs_init_work(&async->work, delayed_ref_async_start,
+- NULL, NULL);
++ btrfs_init_work(&async->work, btrfs_extent_refs_helper,
++ delayed_ref_async_start, NULL, NULL);
+
+ btrfs_queue_work(root->fs_info->extent_workers, &async->work);
+
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -1096,8 +1096,10 @@ static int cow_file_range_async(struct i
+ async_cow->end = cur_end;
+ INIT_LIST_HEAD(&async_cow->extents);
+
+- btrfs_init_work(&async_cow->work, async_cow_start,
+- async_cow_submit, async_cow_free);
++ btrfs_init_work(&async_cow->work,
++ btrfs_delalloc_helper,
++ async_cow_start, async_cow_submit,
++ async_cow_free);
+
+ nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
+ PAGE_CACHE_SHIFT;
+@@ -1881,7 +1883,8 @@ static int btrfs_writepage_start_hook(st
+
+ SetPageChecked(page);
+ page_cache_get(page);
+- btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL);
++ btrfs_init_work(&fixup->work, btrfs_fixup_helper,
++ btrfs_writepage_fixup_worker, NULL, NULL);
+ fixup->page = page;
+ btrfs_queue_work(root->fs_info->fixup_workers, &fixup->work);
+ return -EBUSY;
+@@ -2822,7 +2825,8 @@ static int btrfs_writepage_end_io_hook(s
+ struct inode *inode = page->mapping->host;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_ordered_extent *ordered_extent = NULL;
+- struct btrfs_workqueue *workers;
++ struct btrfs_workqueue *wq;
++ btrfs_work_func_t func;
+
+ trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
+
+@@ -2831,13 +2835,17 @@ static int btrfs_writepage_end_io_hook(s
+ end - start + 1, uptodate))
+ return 0;
+
+- btrfs_init_work(&ordered_extent->work, finish_ordered_fn, NULL, NULL);
++ if (btrfs_is_free_space_inode(inode)) {
++ wq = root->fs_info->endio_freespace_worker;
++ func = btrfs_freespace_write_helper;
++ } else {
++ wq = root->fs_info->endio_write_workers;
++ func = btrfs_endio_write_helper;
++ }
+
+- if (btrfs_is_free_space_inode(inode))
+- workers = root->fs_info->endio_freespace_worker;
+- else
+- workers = root->fs_info->endio_write_workers;
+- btrfs_queue_work(workers, &ordered_extent->work);
++ btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL,
++ NULL);
++ btrfs_queue_work(wq, &ordered_extent->work);
+
+ return 0;
+ }
+@@ -7173,7 +7181,8 @@ again:
+ if (!ret)
+ goto out_test;
+
+- btrfs_init_work(&ordered->work, finish_ordered_fn, NULL, NULL);
++ btrfs_init_work(&ordered->work, btrfs_endio_write_helper,
++ finish_ordered_fn, NULL, NULL);
+ btrfs_queue_work(root->fs_info->endio_write_workers,
+ &ordered->work);
+ out_test:
+@@ -8542,7 +8551,9 @@ struct btrfs_delalloc_work *btrfs_alloc_
+ work->inode = inode;
+ work->wait = wait;
+ work->delay_iput = delay_iput;
+- btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL);
++ WARN_ON_ONCE(!inode);
++ btrfs_init_work(&work->work, btrfs_flush_delalloc_helper,
++ btrfs_run_delalloc_work, NULL, NULL);
+
+ return work;
+ }
+--- a/fs/btrfs/ordered-data.c
++++ b/fs/btrfs/ordered-data.c
+@@ -627,6 +627,7 @@ int btrfs_wait_ordered_extents(struct bt
+ spin_unlock(&root->ordered_extent_lock);
+
+ btrfs_init_work(&ordered->flush_work,
++ btrfs_flush_delalloc_helper,
+ btrfs_run_ordered_extent_work, NULL, NULL);
+ list_add_tail(&ordered->work_list, &works);
+ btrfs_queue_work(root->fs_info->flush_workers,
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -2551,6 +2551,7 @@ qgroup_rescan_init(struct btrfs_fs_info
+ memset(&fs_info->qgroup_rescan_work, 0,
+ sizeof(fs_info->qgroup_rescan_work));
+ btrfs_init_work(&fs_info->qgroup_rescan_work,
++ btrfs_qgroup_rescan_helper,
+ btrfs_qgroup_rescan_worker, NULL, NULL);
+
+ if (ret) {
+--- a/fs/btrfs/raid56.c
++++ b/fs/btrfs/raid56.c
+@@ -1416,7 +1416,8 @@ cleanup:
+
+ static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
+ {
+- btrfs_init_work(&rbio->work, rmw_work, NULL, NULL);
++ btrfs_init_work(&rbio->work, btrfs_rmw_helper,
++ rmw_work, NULL, NULL);
+
+ btrfs_queue_work(rbio->fs_info->rmw_workers,
+ &rbio->work);
+@@ -1424,7 +1425,8 @@ static void async_rmw_stripe(struct btrf
+
+ static void async_read_rebuild(struct btrfs_raid_bio *rbio)
+ {
+- btrfs_init_work(&rbio->work, read_rebuild_work, NULL, NULL);
++ btrfs_init_work(&rbio->work, btrfs_rmw_helper,
++ read_rebuild_work, NULL, NULL);
+
+ btrfs_queue_work(rbio->fs_info->rmw_workers,
+ &rbio->work);
+@@ -1665,7 +1667,8 @@ static void btrfs_raid_unplug(struct blk
+ plug = container_of(cb, struct btrfs_plug_cb, cb);
+
+ if (from_schedule) {
+- btrfs_init_work(&plug->work, unplug_work, NULL, NULL);
++ btrfs_init_work(&plug->work, btrfs_rmw_helper,
++ unplug_work, NULL, NULL);
+ btrfs_queue_work(plug->info->rmw_workers,
+ &plug->work);
+ return;
+--- a/fs/btrfs/reada.c
++++ b/fs/btrfs/reada.c
+@@ -798,7 +798,8 @@ static void reada_start_machine(struct b
+ /* FIXME we cannot handle this properly right now */
+ BUG();
+ }
+- btrfs_init_work(&rmw->work, reada_start_machine_worker, NULL, NULL);
++ btrfs_init_work(&rmw->work, btrfs_readahead_helper,
++ reada_start_machine_worker, NULL, NULL);
+ rmw->fs_info = fs_info;
+
+ btrfs_queue_work(fs_info->readahead_workers, &rmw->work);
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -428,8 +428,8 @@ struct scrub_ctx *scrub_setup_ctx(struct
+ sbio->index = i;
+ sbio->sctx = sctx;
+ sbio->page_count = 0;
+- btrfs_init_work(&sbio->work, scrub_bio_end_io_worker,
+- NULL, NULL);
++ btrfs_init_work(&sbio->work, btrfs_scrub_helper,
++ scrub_bio_end_io_worker, NULL, NULL);
+
+ if (i != SCRUB_BIOS_PER_SCTX - 1)
+ sctx->bios[i]->next_free = i + 1;
+@@ -999,8 +999,8 @@ nodatasum_case:
+ fixup_nodatasum->root = fs_info->extent_root;
+ fixup_nodatasum->mirror_num = failed_mirror_index + 1;
+ scrub_pending_trans_workers_inc(sctx);
+- btrfs_init_work(&fixup_nodatasum->work, scrub_fixup_nodatasum,
+- NULL, NULL);
++ btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
++ scrub_fixup_nodatasum, NULL, NULL);
+ btrfs_queue_work(fs_info->scrub_workers,
+ &fixup_nodatasum->work);
+ goto out;
+@@ -1616,7 +1616,8 @@ static void scrub_wr_bio_end_io(struct b
+ sbio->err = err;
+ sbio->bio = bio;
+
+- btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL);
++ btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
++ scrub_wr_bio_end_io_worker, NULL, NULL);
+ btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
+ }
+
+@@ -3203,7 +3204,8 @@ static int copy_nocow_pages(struct scrub
+ nocow_ctx->len = len;
+ nocow_ctx->mirror_num = mirror_num;
+ nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
+- btrfs_init_work(&nocow_ctx->work, copy_nocow_pages_worker, NULL, NULL);
++ btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
++ copy_nocow_pages_worker, NULL, NULL);
+ INIT_LIST_HEAD(&nocow_ctx->inodes);
+ btrfs_queue_work(fs_info->scrub_nocow_workers,
+ &nocow_ctx->work);
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -5800,7 +5800,8 @@ struct btrfs_device *btrfs_alloc_device(
+ else
+ generate_random_uuid(dev->uuid);
+
+- btrfs_init_work(&dev->work, pending_bios_fn, NULL, NULL);
++ btrfs_init_work(&dev->work, btrfs_submit_helper,
++ pending_bios_fn, NULL, NULL);
+
+ return dev;
+ }
diff --git a/series.conf b/series.conf
index 405c863c5f..9a0ca3c4e3 100644
--- a/series.conf
+++ b/series.conf
@@ -251,7 +251,17 @@
########################################################
patches.suse/btrfs-use-correct-device-for-maps.patch
patches.suse/btrfs-check-index-in-extent_buffer_page
+ patches.suse/btrfs-0003-read-lock-extent-buffer-while-walking-backrefs.patch
+ patches.suse/btrfs-0006-fix-compressed-write-corruption-on-enospc.patch
patches.fixes/Btrfs-Fix-memory-corruption-by-ulist_add_merge-on-32
+ patches.suse/btrfs-0008-fix-csum-tree-corruption-duplicate-and-outdate.patch
+ patches.suse/btrfs-0011-fill_holes-Fix-slot-number-passed-to-hole_merg.patch
+ patches.suse/btrfs-0016-fix-wrong-write-range-for-filemap_fdatawrite_r.patch
+ patches.suse/btrfs-0017-fix-wrong-extent-mapping-for-DirectIO.patch
+ patches.suse/btrfs-0018-Return-right-extent-when-fiemap-gives-unaligne.patch
+ patches.suse/btrfs-0025-ensure-tmpfile-inode-is-always-persisted-with-.patch
+ patches.suse/btrfs-0027-don-t-monopolize-a-core-when-evicting-inode.patch
+ patches.suse/btrfs-0033-fix-task-hang-under-heavy-compressed-write.patch
########################################################
# Reiserfs Patches