Home Home > GIT Browse > SLE12-SP3
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2019-05-17 16:15:48 +0200
committerTakashi Iwai <tiwai@suse.de>2019-05-17 16:15:48 +0200
commitd88cfc3d427ce6e74df1be4eb26c4c09cf830de7 (patch)
tree4aa8ec819f05c574fb7025a9a0d6d41bcd7906a0
parent97bd0d5adff96b35a4e0d2514365cb0b7aa1bd6d (diff)
parent036fef73e273342b0c0bd8f428c4b048399fda8e (diff)
Merge branch 'users/wqu/SLE12-SP3/for-next' into SLE12-SP3SLE12-SP3
Pull btrfs fixes from Qu Wenruo (bsc#1134813)
-rw-r--r--patches.fixes/0001-btrfs-Take-trans-lock-before-access-running-trans-in.patch10
-rw-r--r--patches.fixes/0001-btrfs-qgroup-Move-reserved-data-accounting-from-btrf.patch33
-rw-r--r--patches.suse/0001-btrfs-Factor-out-common-delayed-refs-init-code.patch85
-rw-r--r--patches.suse/0001-btrfs-remove-delayed_ref_node-from-ref_head.patch723
-rw-r--r--patches.suse/0001-btrfs-track-refs-in-a-rb_tree-instead-of-a-list.patch96
-rw-r--r--patches.suse/0002-btrfs-Use-init_delayed_ref_common-in-add_delayed_tre.patch69
-rw-r--r--patches.suse/0003-btrfs-Use-init_delayed_ref_common-in-add_delayed_dat.patch73
-rw-r--r--patches.suse/0004-btrfs-Open-code-add_delayed_tree_ref.patch127
-rw-r--r--patches.suse/0005-btrfs-Open-code-add_delayed_data_ref.patch123
-rw-r--r--patches.suse/0006-btrfs-Introduce-init_delayed_ref_head.patch97
-rw-r--r--patches.suse/0007-btrfs-Use-init_delayed_ref_head-in-add_delayed_ref_h.patch100
-rw-r--r--patches.suse/0008-btrfs-split-delayed-ref-head-initialization-and-addi.patch157
-rw-r--r--patches.suse/btrfs-fix-race-condition-between-delayed-refs-and-blockgroup-removal.patch44
-rw-r--r--series.conf9
14 files changed, 1671 insertions, 75 deletions
diff --git a/patches.fixes/0001-btrfs-Take-trans-lock-before-access-running-trans-in.patch b/patches.fixes/0001-btrfs-Take-trans-lock-before-access-running-trans-in.patch
index cbbaaef660..470b7d57e9 100644
--- a/patches.fixes/0001-btrfs-Take-trans-lock-before-access-running-trans-in.patch
+++ b/patches.fixes/0001-btrfs-Take-trans-lock-before-access-running-trans-in.patch
@@ -31,7 +31,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
-@@ -3080,7 +3080,11 @@ static noinline int check_delayed_ref(st
+@@ -3062,7 +3062,11 @@ static noinline int check_delayed_ref(st
struct rb_node *node;
int ret = 0;
@@ -43,7 +43,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
if (!cur_trans)
return 0;
-@@ -3089,6 +3093,7 @@ static noinline int check_delayed_ref(st
+@@ -3071,6 +3075,7 @@ static noinline int check_delayed_ref(st
head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
if (!head) {
spin_unlock(&delayed_refs->lock);
@@ -51,15 +51,15 @@ Signed-off-by: David Sterba <dsterba@suse.com>
return 0;
}
-@@ -3105,6 +3110,7 @@ static noinline int check_delayed_ref(st
+@@ -3087,6 +3092,7 @@ static noinline int check_delayed_ref(st
mutex_lock(&head->mutex);
mutex_unlock(&head->mutex);
- btrfs_put_delayed_ref(&head->node);
+ btrfs_put_delayed_ref_head(head);
+ btrfs_put_transaction(cur_trans);
return -EAGAIN;
}
spin_unlock(&delayed_refs->lock);
-@@ -3137,6 +3143,7 @@ static noinline int check_delayed_ref(st
+@@ -3119,6 +3125,7 @@ static noinline int check_delayed_ref(st
}
spin_unlock(&head->lock);
mutex_unlock(&head->mutex);
diff --git a/patches.fixes/0001-btrfs-qgroup-Move-reserved-data-accounting-from-btrf.patch b/patches.fixes/0001-btrfs-qgroup-Move-reserved-data-accounting-from-btrf.patch
index fb1fc47758..b32daa0e46 100644
--- a/patches.fixes/0001-btrfs-qgroup-Move-reserved-data-accounting-from-btrf.patch
+++ b/patches.fixes/0001-btrfs-qgroup-Move-reserved-data-accounting-from-btrf.patch
@@ -102,24 +102,25 @@ Fixes: f64d5ca86821 ("btrfs: delayed_ref: Add new function to record reserved sp
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
---
- fs/btrfs/delayed-ref.c | 40 ++++------------------------------------
+ fs/btrfs/delayed-ref.c | 42 ++++--------------------------------------
fs/btrfs/delayed-ref.h | 11 -----------
fs/btrfs/extent-tree.c | 3 ---
fs/btrfs/qgroup.c | 19 +++++++++++++++----
fs/btrfs/qgroup.h | 22 +++++++++++-----------
include/trace/events/btrfs.h | 29 -----------------------------
- 6 files changed, 30 insertions(+), 94 deletions(-)
+ 6 files changed, 30 insertions(+), 96 deletions(-)
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
-@@ -616,14 +616,12 @@ add_delayed_ref_head(struct btrfs_fs_inf
- INIT_LIST_HEAD(&head_ref->ref_add_list);
+@@ -590,15 +590,13 @@ static void init_delayed_ref_head(struct
+ RB_CLEAR_NODE(&head_ref->href_node);
head_ref->processing = 0;
head_ref->total_ref_mod = count_mod;
- head_ref->qgroup_reserved = 0;
- head_ref->qgroup_ref_root = 0;
+ spin_lock_init(&head_ref->lock);
+ mutex_init(&head_ref->mutex);
- /* Record qgroup extent info if provided */
if (qrecord) {
if (ref_root && reserved) {
- head_ref->qgroup_ref_root = ref_root;
@@ -129,16 +130,18 @@ Signed-off-by: David Sterba <dsterba@suse.com>
}
qrecord->bytenr = bytenr;
-@@ -643,8 +641,6 @@ add_delayed_ref_head(struct btrfs_fs_inf
+@@ -640,10 +638,6 @@ add_delayed_ref_head(struct btrfs_fs_inf
existing = htree_insert(&delayed_refs->href_root,
&head_ref->href_node);
if (existing) {
-- WARN_ON(ref_root && reserved && existing->qgroup_ref_root
+- WARN_ON(qrecord && head_ref->qgroup_ref_root
+- && head_ref->qgroup_reserved
+- && existing->qgroup_ref_root
- && existing->qgroup_reserved);
- update_existing_head_ref(delayed_refs, &existing->node, ref,
+ update_existing_head_ref(delayed_refs, existing, head_ref,
old_ref_mod);
/*
-@@ -808,7 +804,7 @@ int btrfs_add_delayed_tree_ref(struct bt
+@@ -760,7 +754,7 @@ int btrfs_add_delayed_tree_ref(struct bt
if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
is_fstree(ref_root)) {
@@ -147,7 +150,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
if (!record)
goto free_head_ref;
}
-@@ -869,7 +865,7 @@ int btrfs_add_delayed_data_ref(struct bt
+@@ -840,7 +834,7 @@ int btrfs_add_delayed_data_ref(struct bt
if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
is_fstree(ref_root)) {
@@ -156,7 +159,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
if (!record) {
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
kmem_cache_free(btrfs_delayed_ref_head_cachep,
-@@ -899,34 +895,6 @@ int btrfs_add_delayed_data_ref(struct bt
+@@ -874,34 +868,6 @@ int btrfs_add_delayed_data_ref(struct bt
return 0;
}
@@ -193,8 +196,8 @@ Signed-off-by: David Sterba <dsterba@suse.com>
u64 bytenr, u64 num_bytes,
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
-@@ -120,17 +120,6 @@ struct btrfs_delayed_ref_head {
- int total_ref_mod;
+@@ -119,17 +119,6 @@ struct btrfs_delayed_ref_head {
+ int ref_mod;
/*
- * For qgroup reserved space freeing.
@@ -221,7 +224,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
- btrfs_qgroup_free_delayed_ref(fs_info, head->qgroup_ref_root,
- head->qgroup_reserved);
btrfs_delayed_ref_unlock(head);
- btrfs_put_delayed_ref(&head->node);
+ btrfs_put_delayed_ref_head(head);
return 0;
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -308,7 +311,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
-@@ -1373,35 +1373,6 @@ DEFINE_EVENT(btrfs__qgroup_rsv_data, btr
+@@ -1370,35 +1370,6 @@ DEFINE_EVENT(btrfs__qgroup_rsv_data, btr
TP_ARGS(inode, start, len, reserved, op)
);
diff --git a/patches.suse/0001-btrfs-Factor-out-common-delayed-refs-init-code.patch b/patches.suse/0001-btrfs-Factor-out-common-delayed-refs-init-code.patch
new file mode 100644
index 0000000000..0bdd15ef30
--- /dev/null
+++ b/patches.suse/0001-btrfs-Factor-out-common-delayed-refs-init-code.patch
@@ -0,0 +1,85 @@
+From cb49a87b2a4edb469e4d295eca4b1d106f64083e Mon Sep 17 00:00:00 2001
+From: Nikolay Borisov <nborisov@suse.com>
+Date: Tue, 24 Apr 2018 17:18:17 +0300
+Git-commit: cb49a87b2a4edb469e4d295eca4b1d106f64083e
+Patch-mainline: v4.18-rc1
+References: bsc#1134813
+Subject: [PATCH 1/8] btrfs: Factor out common delayed refs init code
+
+THe majority of the init code for struct btrfs_delayed_ref_node is
+duplicated in add_delayed_data_ref and add_delayed_tree_ref. Factor out
+the common bits in init_delayed_ref_common. This function is going to be
+used in future patches to clean that up. No functional changes.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/delayed-ref.c | 51 ++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 51 insertions(+)
+
+diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
+index 4fb041e14742..a0dc255792c7 100644
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -644,6 +644,57 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
+ return head_ref;
+ }
+
++/*
++ * init_delayed_ref_common - Initialize the structure which represents a
++ * modification to a an extent.
++ *
++ * @fs_info: Internal to the mounted filesystem mount structure.
++ *
++ * @ref: The structure which is going to be initialized.
++ *
++ * @bytenr: The logical address of the extent for which a modification is
++ * going to be recorded.
++ *
++ * @num_bytes: Size of the extent whose modification is being recorded.
++ *
++ * @ref_root: The id of the root where this modification has originated, this
++ * can be either one of the well-known metadata trees or the
++ * subvolume id which references this extent.
++ *
++ * @action: Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
++ * BTRFS_ADD_DELAYED_EXTENT
++ *
++ * @ref_type: Holds the type of the extent which is being recorded, can be
++ * one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
++ * when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
++ * BTRFS_EXTENT_DATA_REF_KEY when recording data extent
++ */
++static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
++ struct btrfs_delayed_ref_node *ref,
++ u64 bytenr, u64 num_bytes, u64 ref_root,
++ int action, u8 ref_type)
++{
++ u64 seq = 0;
++
++ if (action == BTRFS_ADD_DELAYED_EXTENT)
++ action = BTRFS_ADD_DELAYED_REF;
++
++ if (is_fstree(ref_root))
++ seq = atomic64_read(&fs_info->tree_mod_seq);
++
++ atomic_set(&ref->refs, 1);
++ ref->bytenr = bytenr;
++ ref->num_bytes = num_bytes;
++ ref->ref_mod = 1;
++ ref->action = action;
++ ref->is_head = 0;
++ ref->in_tree = 1;
++ ref->seq = seq;
++ ref->type = ref_type;
++ RB_CLEAR_NODE(&ref->ref_node);
++ INIT_LIST_HEAD(&ref->add_list);
++}
++
+ /*
+ * helper to insert a delayed tree ref into the rbtree.
+ */
+--
+2.21.0
+
diff --git a/patches.suse/0001-btrfs-remove-delayed_ref_node-from-ref_head.patch b/patches.suse/0001-btrfs-remove-delayed_ref_node-from-ref_head.patch
new file mode 100644
index 0000000000..a5ce12b646
--- /dev/null
+++ b/patches.suse/0001-btrfs-remove-delayed_ref_node-from-ref_head.patch
@@ -0,0 +1,723 @@
+From d278850eff3053ef166cf64c16f798dfe36278a2 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <josef@toxicpanda.com>
+Git-commit: d278850eff3053ef166cf64c16f798dfe36278a2
+Patch-mainline: v4.15
+References: bsc#1134813
+Date: Fri, 29 Sep 2017 15:43:57 -0400
+Subject: [PATCH 1/2] btrfs: remove delayed_ref_node from ref_head
+
+This is just excessive information in the ref_head, and makes the code
+complicated. It is a relic from when we had the heads and the refs in
+the same tree, which is no longer the case. With this removal I've
+cleaned up a bunch of the cruft around this old assumption as well.
+
+Signed-off-by: Josef Bacik <jbacik@fb.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/backref.c | 4 -
+ fs/btrfs/delayed-ref.c | 126 ++++++++++++++++++-------------------------
+ fs/btrfs/delayed-ref.h | 49 +++++-----------
+ fs/btrfs/extent-tree.c | 88 +++++++++++-------------------
+ include/trace/events/btrfs.h | 13 +---
+ 5 files changed, 112 insertions(+), 168 deletions(-)
+
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -1160,7 +1160,7 @@ again:
+ head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
+ if (head) {
+ if (!mutex_trylock(&head->mutex)) {
+- atomic_inc(&head->node.refs);
++ atomic_inc(&head->refs);
+ spin_unlock(&delayed_refs->lock);
+
+ btrfs_release_path(path);
+@@ -1171,7 +1171,7 @@ again:
+ */
+ mutex_lock(&head->mutex);
+ mutex_unlock(&head->mutex);
+- btrfs_put_delayed_ref(&head->node);
++ btrfs_put_delayed_ref_head(head);
+ goto again;
+ }
+ spin_unlock(&delayed_refs->lock);
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -96,15 +96,15 @@ static struct btrfs_delayed_ref_head *ht
+ u64 bytenr;
+
+ ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
+- bytenr = ins->node.bytenr;
++ bytenr = ins->bytenr;
+ while (*p) {
+ parent_node = *p;
+ entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
+ href_node);
+
+- if (bytenr < entry->node.bytenr)
++ if (bytenr < entry->bytenr)
+ p = &(*p)->rb_left;
+- else if (bytenr > entry->node.bytenr)
++ else if (bytenr > entry->bytenr)
+ p = &(*p)->rb_right;
+ else
+ return entry;
+@@ -133,15 +133,15 @@ find_ref_head(struct rb_root *root, u64
+ while (n) {
+ entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
+
+- if (bytenr < entry->node.bytenr)
++ if (bytenr < entry->bytenr)
+ n = n->rb_left;
+- else if (bytenr > entry->node.bytenr)
++ else if (bytenr > entry->bytenr)
+ n = n->rb_right;
+ else
+ return entry;
+ }
+ if (entry && return_bigger) {
+- if (bytenr > entry->node.bytenr) {
++ if (bytenr > entry->bytenr) {
+ n = rb_next(&entry->href_node);
+ if (!n)
+ n = rb_first(root);
+@@ -164,17 +164,17 @@ int btrfs_delayed_ref_lock(struct btrfs_
+ if (mutex_trylock(&head->mutex))
+ return 0;
+
+- atomic_inc(&head->node.refs);
++ atomic_inc(&head->refs);
+ spin_unlock(&delayed_refs->lock);
+
+ mutex_lock(&head->mutex);
+ spin_lock(&delayed_refs->lock);
+- if (!head->node.in_tree) {
++ if (RB_EMPTY_NODE(&head->href_node)) {
+ mutex_unlock(&head->mutex);
+- btrfs_put_delayed_ref(&head->node);
++ btrfs_put_delayed_ref_head(head);
+ return -EAGAIN;
+ }
+- btrfs_put_delayed_ref(&head->node);
++ btrfs_put_delayed_ref_head(head);
+ return 0;
+ }
+
+@@ -183,15 +183,10 @@ static inline void drop_delayed_ref(stru
+ struct btrfs_delayed_ref_head *head,
+ struct btrfs_delayed_ref_node *ref)
+ {
+- if (btrfs_delayed_ref_is_head(ref)) {
+- head = btrfs_delayed_node_to_head(ref);
+- rb_erase(&head->href_node, &delayed_refs->href_root);
+- } else {
+- assert_spin_locked(&head->lock);
+- list_del(&ref->list);
+- if (!list_empty(&ref->add_list))
+- list_del(&ref->add_list);
+- }
++ assert_spin_locked(&head->lock);
++ list_del(&ref->list);
++ if (!list_empty(&ref->add_list))
++ list_del(&ref->add_list);
+ ref->in_tree = 0;
+ btrfs_put_delayed_ref(ref);
+ atomic_dec(&delayed_refs->num_entries);
+@@ -380,8 +375,8 @@ again:
+ head->processing = 1;
+ WARN_ON(delayed_refs->num_heads_ready == 0);
+ delayed_refs->num_heads_ready--;
+- delayed_refs->run_delayed_start = head->node.bytenr +
+- head->node.num_bytes;
++ delayed_refs->run_delayed_start = head->bytenr +
++ head->num_bytes;
+ return head;
+ }
+
+@@ -469,20 +464,16 @@ add_tail:
+ */
+ static noinline void
+ update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
+- struct btrfs_delayed_ref_node *existing,
+- struct btrfs_delayed_ref_node *update,
++ struct btrfs_delayed_ref_head *existing,
++ struct btrfs_delayed_ref_head *update,
+ int *old_ref_mod_ret)
+ {
+- struct btrfs_delayed_ref_head *existing_ref;
+- struct btrfs_delayed_ref_head *ref;
+ int old_ref_mod;
+
+- existing_ref = btrfs_delayed_node_to_head(existing);
+- ref = btrfs_delayed_node_to_head(update);
+- BUG_ON(existing_ref->is_data != ref->is_data);
++ BUG_ON(existing->is_data != update->is_data);
+
+- spin_lock(&existing_ref->lock);
+- if (ref->must_insert_reserved) {
++ spin_lock(&existing->lock);
++ if (update->must_insert_reserved) {
+ /* if the extent was freed and then
+ * reallocated before the delayed ref
+ * entries were processed, we can end up
+@@ -490,7 +481,7 @@ update_existing_head_ref(struct btrfs_de
+ * the must_insert_reserved flag set.
+ * Set it again here
+ */
+- existing_ref->must_insert_reserved = ref->must_insert_reserved;
++ existing->must_insert_reserved = update->must_insert_reserved;
+
+ /*
+ * update the num_bytes so we make sure the accounting
+@@ -500,22 +491,22 @@ update_existing_head_ref(struct btrfs_de
+
+ }
+
+- if (ref->extent_op) {
+- if (!existing_ref->extent_op) {
+- existing_ref->extent_op = ref->extent_op;
++ if (update->extent_op) {
++ if (!existing->extent_op) {
++ existing->extent_op = update->extent_op;
+ } else {
+- if (ref->extent_op->update_key) {
+- memcpy(&existing_ref->extent_op->key,
+- &ref->extent_op->key,
+- sizeof(ref->extent_op->key));
+- existing_ref->extent_op->update_key = true;
++ if (update->extent_op->update_key) {
++ memcpy(&existing->extent_op->key,
++ &update->extent_op->key,
++ sizeof(update->extent_op->key));
++ existing->extent_op->update_key = true;
+ }
+- if (ref->extent_op->update_flags) {
+- existing_ref->extent_op->flags_to_set |=
+- ref->extent_op->flags_to_set;
+- existing_ref->extent_op->update_flags = true;
++ if (update->extent_op->update_flags) {
++ existing->extent_op->flags_to_set |=
++ update->extent_op->flags_to_set;
++ existing->extent_op->update_flags = true;
+ }
+- btrfs_free_delayed_extent_op(ref->extent_op);
++ btrfs_free_delayed_extent_op(update->extent_op);
+ }
+ }
+ /*
+@@ -523,23 +514,23 @@ update_existing_head_ref(struct btrfs_de
+ * only need the lock for this case cause we could be processing it
+ * currently, for refs we just added we know we're a-ok.
+ */
+- old_ref_mod = existing_ref->total_ref_mod;
++ old_ref_mod = existing->total_ref_mod;
+ if (old_ref_mod_ret)
+ *old_ref_mod_ret = old_ref_mod;
+ existing->ref_mod += update->ref_mod;
+- existing_ref->total_ref_mod += update->ref_mod;
++ existing->total_ref_mod += update->ref_mod;
+
+ /*
+ * If we are going to from a positive ref mod to a negative or vice
+ * versa we need to make sure to adjust pending_csums accordingly.
+ */
+- if (existing_ref->is_data) {
+- if (existing_ref->total_ref_mod >= 0 && old_ref_mod < 0)
++ if (existing->is_data) {
++ if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
+ delayed_refs->pending_csums -= existing->num_bytes;
+- if (existing_ref->total_ref_mod < 0 && old_ref_mod >= 0)
++ if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
+ delayed_refs->pending_csums += existing->num_bytes;
+ }
+- spin_unlock(&existing_ref->lock);
++ spin_unlock(&existing->lock);
+ }
+
+ /*
+@@ -550,14 +541,13 @@ update_existing_head_ref(struct btrfs_de
+ static noinline struct btrfs_delayed_ref_head *
+ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
+ struct btrfs_trans_handle *trans,
+- struct btrfs_delayed_ref_node *ref,
++ struct btrfs_delayed_ref_head *head_ref,
+ struct btrfs_qgroup_extent_record *qrecord,
+ u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
+ int action, int is_data, int *old_ref_mod,
+ int *new_ref_mod)
+ {
+ struct btrfs_delayed_ref_head *existing;
+- struct btrfs_delayed_ref_head *head_ref = NULL;
+ struct btrfs_delayed_ref_root *delayed_refs;
+ int count_mod = 1;
+ int must_insert_reserved = 0;
+@@ -592,26 +582,21 @@ add_delayed_ref_head(struct btrfs_fs_inf
+
+ delayed_refs = &trans->transaction->delayed_refs;
+
+- /* first set the basic ref node struct up */
+- atomic_set(&ref->refs, 1);
+- ref->bytenr = bytenr;
+- ref->num_bytes = num_bytes;
+- ref->ref_mod = count_mod;
+- ref->type = 0;
+- ref->action = 0;
+- ref->is_head = 1;
+- ref->in_tree = 1;
+- ref->seq = 0;
+-
+- head_ref = btrfs_delayed_node_to_head(ref);
++ atomic_set(&head_ref->refs, 1);
++ head_ref->bytenr = bytenr;
++ head_ref->num_bytes = num_bytes;
++ head_ref->ref_mod = count_mod;
+ head_ref->must_insert_reserved = must_insert_reserved;
+ head_ref->is_data = is_data;
+ INIT_LIST_HEAD(&head_ref->ref_list);
+ INIT_LIST_HEAD(&head_ref->ref_add_list);
++ RB_CLEAR_NODE(&head_ref->href_node);
+ head_ref->processing = 0;
+ head_ref->total_ref_mod = count_mod;
+ head_ref->qgroup_reserved = 0;
+ head_ref->qgroup_ref_root = 0;
++ spin_lock_init(&head_ref->lock);
++ mutex_init(&head_ref->mutex);
+
+ /* Record qgroup extent info if provided */
+ if (qrecord) {
+@@ -629,17 +614,14 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ kfree(qrecord);
+ }
+
+- spin_lock_init(&head_ref->lock);
+- mutex_init(&head_ref->mutex);
+-
+- trace_add_delayed_ref_head(fs_info, ref, head_ref, action);
++ trace_add_delayed_ref_head(fs_info, head_ref, action);
+
+ existing = htree_insert(&delayed_refs->href_root,
+ &head_ref->href_node);
+ if (existing) {
+ WARN_ON(ref_root && reserved && existing->qgroup_ref_root
+ && existing->qgroup_reserved);
+- update_existing_head_ref(delayed_refs, &existing->node, ref,
++ update_existing_head_ref(delayed_refs, existing, head_ref,
+ old_ref_mod);
+ /*
+ * we've updated the existing ref, free the newly
+@@ -815,7 +797,7 @@ int btrfs_add_delayed_tree_ref(struct bt
+ * insert both the head node and the new ref without dropping
+ * the spin lock
+ */
+- head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
++ head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
+ bytenr, num_bytes, 0, 0, action, 0,
+ old_ref_mod, new_ref_mod);
+
+@@ -880,7 +862,7 @@ int btrfs_add_delayed_data_ref(struct bt
+ * insert both the head node and the new ref without dropping
+ * the spin lock
+ */
+- head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
++ head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
+ bytenr, num_bytes, ref_root, reserved,
+ action, 1, old_ref_mod, new_ref_mod);
+
+@@ -937,7 +919,7 @@ int btrfs_add_delayed_extent_op(struct b
+ delayed_refs = &trans->transaction->delayed_refs;
+ spin_lock(&delayed_refs->lock);
+
+- add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
++ add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr,
+ num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
+ extent_op->is_data, NULL, NULL);
+
+--- a/fs/btrfs/delayed-ref.h
++++ b/fs/btrfs/delayed-ref.h
+@@ -24,15 +24,6 @@
+ #define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */
+ #define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */
+
+-/*
+- * XXX: Qu: I really hate the design that ref_head and tree/data ref shares the
+- * same ref_node structure.
+- * Ref_head is in a higher logic level than tree/data ref, and duplicated
+- * bytenr/num_bytes in ref_node is really a waste or memory, they should be
+- * referred from ref_head.
+- * This gets more disgusting after we use list to store tree/data ref in
+- * ref_head. Must clean this mess up later.
+- */
+ struct btrfs_delayed_ref_node {
+ /*
+ * ref_head use rb tree, stored in ref_root->href.
+@@ -95,8 +86,9 @@ struct btrfs_delayed_extent_op {
+ * reference count modifications we've queued up.
+ */
+ struct btrfs_delayed_ref_head {
+- struct btrfs_delayed_ref_node node;
+-
++ u64 bytenr;
++ u64 num_bytes;
++ atomic_t refs;
+ /*
+ * the mutex is held while running the refs, and it is also
+ * held when checking the sum of reference modifications.
+@@ -120,6 +112,14 @@ struct btrfs_delayed_ref_head {
+ int total_ref_mod;
+
+ /*
++ * This is the current outstanding mod references for this bytenr. This
++ * is used with lookup_extent_info to get an accurate reference count
++ * for a bytenr, so it is adjusted as delayed refs are run so that any
++ * on disk reference count + ref_mod is accurate.
++ */
++ int ref_mod;
++
++ /*
+ * For qgroup reserved space freeing.
+ *
+ * ref_root and reserved will be recorded after
+@@ -238,15 +238,18 @@ static inline void btrfs_put_delayed_ref
+ case BTRFS_SHARED_DATA_REF_KEY:
+ kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
+ break;
+- case 0:
+- kmem_cache_free(btrfs_delayed_ref_head_cachep, ref);
+- break;
+ default:
+ BUG();
+ }
+ }
+ }
+
++static inline void btrfs_put_delayed_ref_head(struct btrfs_delayed_ref_head *head)
++{
++ if (atomic_dec_and_test(&head->refs))
++ kmem_cache_free(btrfs_delayed_ref_head_cachep, head);
++}
++
+ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+ struct btrfs_trans_handle *trans,
+ u64 bytenr, u64 num_bytes, u64 parent,
+@@ -291,35 +294,17 @@ int btrfs_check_delayed_seq(struct btrfs
+ u64 seq);
+
+ /*
+- * a node might live in a head or a regular ref, this lets you
+- * test for the proper type to use.
+- */
+-static int btrfs_delayed_ref_is_head(struct btrfs_delayed_ref_node *node)
+-{
+- return node->is_head;
+-}
+-
+-/*
+ * helper functions to cast a node into its container
+ */
+ static inline struct btrfs_delayed_tree_ref *
+ btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node)
+ {
+- WARN_ON(btrfs_delayed_ref_is_head(node));
+ return container_of(node, struct btrfs_delayed_tree_ref, node);
+ }
+
+ static inline struct btrfs_delayed_data_ref *
+ btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node)
+ {
+- WARN_ON(btrfs_delayed_ref_is_head(node));
+ return container_of(node, struct btrfs_delayed_data_ref, node);
+ }
+-
+-static inline struct btrfs_delayed_ref_head *
+-btrfs_delayed_node_to_head(struct btrfs_delayed_ref_node *node)
+-{
+- WARN_ON(!btrfs_delayed_ref_is_head(node));
+- return container_of(node, struct btrfs_delayed_ref_head, node);
+-}
+ #endif
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -912,7 +912,7 @@ search_again:
+ head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
+ if (head) {
+ if (!mutex_trylock(&head->mutex)) {
+- atomic_inc(&head->node.refs);
++ atomic_inc(&head->refs);
+ spin_unlock(&delayed_refs->lock);
+
+ btrfs_release_path(path);
+@@ -923,7 +923,7 @@ search_again:
+ */
+ mutex_lock(&head->mutex);
+ mutex_unlock(&head->mutex);
+- btrfs_put_delayed_ref(&head->node);
++ btrfs_put_delayed_ref_head(head);
+ goto search_again;
+ }
+ spin_lock(&head->lock);
+@@ -932,7 +932,7 @@ search_again:
+ else
+ BUG_ON(num_refs == 0);
+
+- num_refs += head->node.ref_mod;
++ num_refs += head->ref_mod;
+ spin_unlock(&head->lock);
+ mutex_unlock(&head->mutex);
+ }
+@@ -2249,7 +2249,7 @@ static void __run_delayed_extent_op(stru
+
+ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+- struct btrfs_delayed_ref_node *node,
++ struct btrfs_delayed_ref_head *head,
+ struct btrfs_delayed_extent_op *extent_op)
+ {
+ struct btrfs_key key;
+@@ -2271,14 +2271,14 @@ static int run_delayed_extent_op(struct
+ if (!path)
+ return -ENOMEM;
+
+- key.objectid = node->bytenr;
++ key.objectid = head->bytenr;
+
+ if (metadata) {
+ key.type = BTRFS_METADATA_ITEM_KEY;
+ key.offset = extent_op->level;
+ } else {
+ key.type = BTRFS_EXTENT_ITEM_KEY;
+- key.offset = node->num_bytes;
++ key.offset = head->num_bytes;
+ }
+
+ again:
+@@ -2296,17 +2296,17 @@ again:
+ path->slots[0]--;
+ btrfs_item_key_to_cpu(path->nodes[0], &key,
+ path->slots[0]);
+- if (key.objectid == node->bytenr &&
++ if (key.objectid == head->bytenr &&
+ key.type == BTRFS_EXTENT_ITEM_KEY &&
+- key.offset == node->num_bytes)
++ key.offset == head->num_bytes)
+ ret = 0;
+ }
+ if (ret > 0) {
+ btrfs_release_path(path);
+ metadata = 0;
+
+- key.objectid = node->bytenr;
+- key.offset = node->num_bytes;
++ key.objectid = head->bytenr;
++ key.offset = head->num_bytes;
+ key.type = BTRFS_EXTENT_ITEM_KEY;
+ goto again;
+ }
+@@ -2477,7 +2477,7 @@ static int cleanup_extent_op(struct btrf
+ return 0;
+ }
+ spin_unlock(&head->lock);
+- ret = run_delayed_extent_op(trans, fs_info->extent_root, &head->node,
++ ret = run_delayed_extent_op(trans, fs_info->extent_root, head,
+ extent_op);
+ btrfs_free_delayed_extent_op(extent_op);
+ return ret ? ret : 1;
+@@ -2513,39 +2513,38 @@ static int cleanup_ref_head(struct btrfs
+ spin_unlock(&delayed_refs->lock);
+ return 1;
+ }
+- head->node.in_tree = 0;
+ delayed_refs->num_heads--;
+ rb_erase(&head->href_node, &delayed_refs->href_root);
++ RB_CLEAR_NODE(&head->href_node);
+ spin_unlock(&delayed_refs->lock);
+ spin_unlock(&head->lock);
+ atomic_dec(&delayed_refs->num_entries);
+
+- trace_run_delayed_ref_head(fs_info, &head->node, head,
+- head->node.action);
++ trace_run_delayed_ref_head(fs_info, head, 0);
+
+ if (head->total_ref_mod < 0) {
+ struct btrfs_block_group_cache *cache;
+
+- cache = btrfs_lookup_block_group(fs_info, head->node.bytenr);
++ cache = btrfs_lookup_block_group(fs_info, head->bytenr);
+ ASSERT(cache);
+ percpu_counter_add(&cache->space_info->total_bytes_pinned,
+- -head->node.num_bytes);
++ -head->num_bytes);
+ btrfs_put_block_group(cache);
+
+ if (head->is_data) {
+ spin_lock(&delayed_refs->lock);
+- delayed_refs->pending_csums -= head->node.num_bytes;
++ delayed_refs->pending_csums -= head->num_bytes;
+ spin_unlock(&delayed_refs->lock);
+ }
+ }
+
+ if (head->must_insert_reserved) {
+- btrfs_pin_extent(fs_info->extent_root, head->node.bytenr,
+- head->node.num_bytes, 1);
++ btrfs_pin_extent(fs_info->extent_root, head->bytenr,
++ head->num_bytes, 1);
+ if (head->is_data) {
+ ret = btrfs_del_csums(trans, fs_info->csum_root,
+- head->node.bytenr,
+- head->node.num_bytes);
++ head->bytenr,
++ head->num_bytes);
+ }
+ }
+
+@@ -2553,7 +2552,7 @@ static int cleanup_ref_head(struct btrfs
+ btrfs_qgroup_free_delayed_ref(fs_info, head->qgroup_ref_root,
+ head->qgroup_reserved);
+ btrfs_delayed_ref_unlock(head);
+- btrfs_put_delayed_ref(&head->node);
++ btrfs_put_delayed_ref_head(head);
+ return 0;
+ }
+
+@@ -2668,10 +2667,10 @@ static noinline int __btrfs_run_delayed_
+ switch (ref->action) {
+ case BTRFS_ADD_DELAYED_REF:
+ case BTRFS_ADD_DELAYED_EXTENT:
+- locked_ref->node.ref_mod -= ref->ref_mod;
++ locked_ref->ref_mod -= ref->ref_mod;
+ break;
+ case BTRFS_DROP_DELAYED_REF:
+- locked_ref->node.ref_mod += ref->ref_mod;
++ locked_ref->ref_mod += ref->ref_mod;
+ break;
+ default:
+ WARN_ON(1);
+@@ -3001,32 +3000,14 @@ again:
+ }
+ count = (unsigned long)-1;
+
+- while (node) {
+- head = rb_entry(node, struct btrfs_delayed_ref_head,
+- href_node);
+- if (btrfs_delayed_ref_is_head(&head->node)) {
+- struct btrfs_delayed_ref_node *ref;
+-
+- ref = &head->node;
+- atomic_inc(&ref->refs);
+-
+- spin_unlock(&delayed_refs->lock);
+- /*
+- * Mutex was contended, block until it's
+- * released and try again
+- */
+- mutex_lock(&head->mutex);
+- mutex_unlock(&head->mutex);
+-
+- btrfs_put_delayed_ref(ref);
+- cond_resched();
+- goto again;
+- } else {
+- WARN_ON(1);
+- }
+- node = rb_next(node);
+- }
++ head = rb_entry(node, struct btrfs_delayed_ref_head,
++ href_node);
++ atomic_inc(&head->refs);
+ spin_unlock(&delayed_refs->lock);
++
++ /* Mutex was contended, block until it's released and retry. */
++ mutex_lock(&head->mutex);
++ mutex_unlock(&head->mutex);
+ cond_resched();
+ goto again;
+ }
+@@ -3085,7 +3066,7 @@ static noinline int check_delayed_ref(st
+ }
+
+ if (!mutex_trylock(&head->mutex)) {
+- atomic_inc(&head->node.refs);
++ atomic_inc(&head->refs);
+ spin_unlock(&delayed_refs->lock);
+
+ btrfs_release_path(path);
+@@ -3096,7 +3077,7 @@ static noinline int check_delayed_ref(st
+ */
+ mutex_lock(&head->mutex);
+ mutex_unlock(&head->mutex);
+- btrfs_put_delayed_ref(&head->node);
++ btrfs_put_delayed_ref_head(head);
+ return -EAGAIN;
+ }
+ spin_unlock(&delayed_refs->lock);
+@@ -7160,9 +7141,8 @@ static noinline int check_ref_cleanup(st
+ * at this point we have a head with no other entries. Go
+ * ahead and process it.
+ */
+- head->node.in_tree = 0;
+ rb_erase(&head->href_node, &delayed_refs->href_root);
+-
++ RB_CLEAR_NODE(&head->href_node);
+ atomic_dec(&delayed_refs->num_entries);
+
+ /*
+@@ -7181,7 +7161,7 @@ static noinline int check_ref_cleanup(st
+ ret = 1;
+
+ mutex_unlock(&head->mutex);
+- btrfs_put_delayed_ref(&head->node);
++ btrfs_put_delayed_ref_head(head);
+ return ret;
+ out:
+ spin_unlock(&head->lock);
+--- a/include/trace/events/btrfs.h
++++ b/include/trace/events/btrfs.h
+@@ -643,11 +643,10 @@ DEFINE_EVENT(btrfs_delayed_data_ref, ru
+ DECLARE_EVENT_CLASS(btrfs_delayed_ref_head,
+
+ TP_PROTO(const struct btrfs_fs_info *fs_info,
+- const struct btrfs_delayed_ref_node *ref,
+ const struct btrfs_delayed_ref_head *head_ref,
+ int action),
+
+- TP_ARGS(fs_info, ref, head_ref, action),
++ TP_ARGS(fs_info, head_ref, action),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, bytenr )
+@@ -657,8 +656,8 @@ DECLARE_EVENT_CLASS(btrfs_delayed_ref_he
+ ),
+
+ TP_fast_assign_btrfs(fs_info,
+- __entry->bytenr = ref->bytenr;
+- __entry->num_bytes = ref->num_bytes;
++ __entry->bytenr = head_ref->bytenr;
++ __entry->num_bytes = head_ref->num_bytes;
+ __entry->action = action;
+ __entry->is_data = head_ref->is_data;
+ ),
+@@ -673,21 +672,19 @@ DECLARE_EVENT_CLASS(btrfs_delayed_ref_he
+ DEFINE_EVENT(btrfs_delayed_ref_head, add_delayed_ref_head,
+
+ TP_PROTO(const struct btrfs_fs_info *fs_info,
+- const struct btrfs_delayed_ref_node *ref,
+ const struct btrfs_delayed_ref_head *head_ref,
+ int action),
+
+- TP_ARGS(fs_info, ref, head_ref, action)
++ TP_ARGS(fs_info, head_ref, action)
+ );
+
+ DEFINE_EVENT(btrfs_delayed_ref_head, run_delayed_ref_head,
+
+ TP_PROTO(const struct btrfs_fs_info *fs_info,
+- const struct btrfs_delayed_ref_node *ref,
+ const struct btrfs_delayed_ref_head *head_ref,
+ int action),
+
+- TP_ARGS(fs_info, ref, head_ref, action)
++ TP_ARGS(fs_info, head_ref, action)
+ );
+
+ #define show_chunk_type(type) \
diff --git a/patches.suse/0001-btrfs-track-refs-in-a-rb_tree-instead-of-a-list.patch b/patches.suse/0001-btrfs-track-refs-in-a-rb_tree-instead-of-a-list.patch
index 50e17fddf4..488c238f91 100644
--- a/patches.suse/0001-btrfs-track-refs-in-a-rb_tree-instead-of-a-list.patch
+++ b/patches.suse/0001-btrfs-track-refs-in-a-rb_tree-instead-of-a-list.patch
@@ -25,9 +25,9 @@ Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/backref.c | 5 +-
fs/btrfs/delayed-ref.c | 108 +++++++++++++++++++++++++------------------------
fs/btrfs/delayed-ref.h | 5 --
- fs/btrfs/disk-io.c | 10 ++--
+ fs/btrfs/disk-io.c | 21 ++++-----
fs/btrfs/extent-tree.c | 21 ++++++---
- 5 files changed, 82 insertions(+), 67 deletions(-)
+ 5 files changed, 87 insertions(+), 73 deletions(-)
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -87,17 +87,17 @@ Signed-off-by: David Sterba <dsterba@suse.com>
/*
* find an head entry based on bytenr. This returns the delayed ref
* head if it was able to find one, or NULL if nothing was in that spot.
-@@ -216,7 +244,8 @@ static inline void drop_delayed_ref(stru
- rb_erase(&head->href_node, &delayed_refs->href_root);
- } else {
- assert_spin_locked(&head->lock);
-- list_del(&ref->list);
-+ rb_erase(&ref->ref_node, &head->ref_tree);
-+ RB_CLEAR_NODE(&ref->ref_node);
- if (!list_empty(&ref->add_list))
- list_del(&ref->add_list);
- }
-@@ -234,24 +263,18 @@ static bool merge_ref(struct btrfs_trans
+@@ -212,7 +240,8 @@ static inline void drop_delayed_ref(stru
+ struct btrfs_delayed_ref_node *ref)
+ {
+ assert_spin_locked(&head->lock);
+- list_del(&ref->list);
++ rb_erase(&ref->ref_node, &head->ref_tree);
++ RB_CLEAR_NODE(&ref->ref_node);
+ if (!list_empty(&ref->add_list))
+ list_del(&ref->add_list);
+ ref->in_tree = 0;
+@@ -229,24 +258,18 @@ static bool merge_ref(struct btrfs_trans
u64 seq)
{
struct btrfs_delayed_ref_node *next;
@@ -128,7 +128,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
if (ref->action == next->action) {
mod = next->ref_mod;
-@@ -275,8 +298,6 @@ static bool merge_ref(struct btrfs_trans
+@@ -270,8 +293,6 @@ static bool merge_ref(struct btrfs_trans
WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
}
@@ -137,7 +137,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
}
return done;
-@@ -288,11 +309,12 @@ void btrfs_merge_delayed_refs(struct btr
+@@ -283,11 +304,12 @@ void btrfs_merge_delayed_refs(struct btr
struct btrfs_delayed_ref_head *head)
{
struct btrfs_delayed_ref_node *ref;
@@ -151,7 +151,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
return;
/* We don't have too many refs to merge for data. */
-@@ -309,22 +331,13 @@ void btrfs_merge_delayed_refs(struct btr
+@@ -304,22 +326,13 @@ void btrfs_merge_delayed_refs(struct btr
}
spin_unlock(&fs_info->tree_mod_seq_lock);
@@ -179,7 +179,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
}
}
-@@ -407,25 +420,19 @@ again:
+@@ -402,25 +415,19 @@ again:
* Return 0 for insert.
* Return >0 for merge.
*/
@@ -212,7 +212,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
/* Now we are sure we can merge */
ret = 1;
-@@ -456,9 +463,7 @@ add_delayed_ref_tail_merge(struct btrfs_
+@@ -451,9 +458,7 @@ add_delayed_ref_tail_merge(struct btrfs_
drop_delayed_ref(trans, root, href, exist);
spin_unlock(&href->lock);
return ret;
@@ -223,16 +223,16 @@ Signed-off-by: David Sterba <dsterba@suse.com>
if (ref->action == BTRFS_ADD_DELAYED_REF)
list_add_tail(&ref->add_list, &href->ref_add_list);
atomic_inc(&root->num_entries);
-@@ -610,7 +615,7 @@ add_delayed_ref_head(struct btrfs_fs_inf
- head_ref = btrfs_delayed_node_to_head(ref);
+@@ -592,7 +597,7 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ head_ref->ref_mod = count_mod;
head_ref->must_insert_reserved = must_insert_reserved;
head_ref->is_data = is_data;
- INIT_LIST_HEAD(&head_ref->ref_list);
+ head_ref->ref_tree = RB_ROOT;
INIT_LIST_HEAD(&head_ref->ref_add_list);
+ RB_CLEAR_NODE(&head_ref->href_node);
head_ref->processing = 0;
- head_ref->total_ref_mod = count_mod;
-@@ -698,7 +703,7 @@ add_delayed_tree_ref(struct btrfs_fs_inf
+@@ -680,7 +685,7 @@ add_delayed_tree_ref(struct btrfs_fs_inf
ref->is_head = 0;
ref->in_tree = 1;
ref->seq = seq;
@@ -241,7 +241,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
INIT_LIST_HEAD(&ref->add_list);
full_ref = btrfs_delayed_node_to_tree_ref(ref);
-@@ -712,7 +717,7 @@ add_delayed_tree_ref(struct btrfs_fs_inf
+@@ -694,7 +699,7 @@ add_delayed_tree_ref(struct btrfs_fs_inf
trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
@@ -250,7 +250,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
/*
* XXX: memory should be freed at the same level allocated.
-@@ -755,7 +760,7 @@ add_delayed_data_ref(struct btrfs_fs_inf
+@@ -737,7 +742,7 @@ add_delayed_data_ref(struct btrfs_fs_inf
ref->is_head = 0;
ref->in_tree = 1;
ref->seq = seq;
@@ -259,7 +259,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
INIT_LIST_HEAD(&ref->add_list);
full_ref = btrfs_delayed_node_to_data_ref(ref);
-@@ -771,8 +776,7 @@ add_delayed_data_ref(struct btrfs_fs_inf
+@@ -753,8 +758,7 @@ add_delayed_data_ref(struct btrfs_fs_inf
trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
@@ -271,7 +271,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
}
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
-@@ -40,8 +40,7 @@ struct btrfs_delayed_ref_node {
+@@ -31,8 +31,7 @@ struct btrfs_delayed_ref_node {
*/
struct rb_node rb_node;
@@ -281,7 +281,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
/*
* If action is BTRFS_ADD_DELAYED_REF, also link this node to
* ref_head->ref_add_list, then we do not need to iterate the
-@@ -104,7 +103,7 @@ struct btrfs_delayed_ref_head {
+@@ -96,7 +95,7 @@ struct btrfs_delayed_ref_head {
struct mutex mutex;
spinlock_t lock;
@@ -292,7 +292,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
-@@ -4162,7 +4162,7 @@ static int btrfs_destroy_delayed_refs(st
+@@ -4162,26 +4162,28 @@ static int btrfs_destroy_delayed_refs(st
while ((node = rb_first(&delayed_refs->href_root)) != NULL) {
struct btrfs_delayed_ref_head *head;
@@ -301,7 +301,17 @@ Signed-off-by: David Sterba <dsterba@suse.com>
bool pin_bytes = false;
head = rb_entry(node, struct btrfs_delayed_ref_head,
-@@ -4178,10 +4178,12 @@ static int btrfs_destroy_delayed_refs(st
+ href_node);
+ if (!mutex_trylock(&head->mutex)) {
+- atomic_inc(&head->node.refs);
++ atomic_inc(&head->refs);
+ spin_unlock(&delayed_refs->lock);
+
+ mutex_lock(&head->mutex);
+ mutex_unlock(&head->mutex);
+- btrfs_put_delayed_ref(&head->node);
++ btrfs_put_delayed_ref_head(head);
+ spin_lock(&delayed_refs->lock);
continue;
}
spin_lock(&head->lock);
@@ -317,6 +327,26 @@ Signed-off-by: David Sterba <dsterba@suse.com>
if (!list_empty(&ref->add_list))
list_del(&ref->add_list);
atomic_dec(&delayed_refs->num_entries);
+@@ -4194,16 +4196,15 @@ static int btrfs_destroy_delayed_refs(st
+ if (head->processing == 0)
+ delayed_refs->num_heads_ready--;
+ atomic_dec(&delayed_refs->num_entries);
+- head->node.in_tree = 0;
+ rb_erase(&head->href_node, &delayed_refs->href_root);
+ spin_unlock(&head->lock);
+ spin_unlock(&delayed_refs->lock);
+ mutex_unlock(&head->mutex);
+
+ if (pin_bytes)
+- btrfs_pin_extent(root, head->node.bytenr,
+- head->node.num_bytes, 1);
+- btrfs_put_delayed_ref(&head->node);
++ btrfs_pin_extent(root, head->bytenr,
++ head->num_bytes, 1);
++ btrfs_put_delayed_ref_head(head);
+ cond_resched();
+ spin_lock(&delayed_refs->lock);
+ }
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2434,7 +2434,7 @@ select_delayed_ref(struct btrfs_delayed_
@@ -348,7 +378,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
spin_unlock(&head->lock);
spin_unlock(&delayed_refs->lock);
return 1;
-@@ -2659,7 +2659,8 @@ static noinline int __btrfs_run_delayed_
+@@ -2658,7 +2658,8 @@ static noinline int __btrfs_run_delayed_
actual_count++;
ref->in_tree = 0;
@@ -358,7 +388,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
if (!list_empty(&ref->add_list))
list_del(&ref->add_list);
/*
-@@ -3071,6 +3072,7 @@ static noinline int check_delayed_ref(st
+@@ -3052,6 +3053,7 @@ static noinline int check_delayed_ref(st
struct btrfs_delayed_data_ref *data_ref;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_transaction *cur_trans;
@@ -366,7 +396,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
int ret = 0;
cur_trans = root->fs_info->running_transaction;
-@@ -3103,7 +3105,12 @@ static noinline int check_delayed_ref(st
+@@ -3084,7 +3086,12 @@ static noinline int check_delayed_ref(st
spin_unlock(&delayed_refs->lock);
spin_lock(&head->lock);
@@ -380,7 +410,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
/* If it's a shared ref we know a cross reference exists */
if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
ret = 1;
-@@ -7078,7 +7085,7 @@ static noinline int check_ref_cleanup(st
+@@ -7059,7 +7066,7 @@ static noinline int check_ref_cleanup(st
goto out_delayed_unlock;
spin_lock(&head->lock);
diff --git a/patches.suse/0002-btrfs-Use-init_delayed_ref_common-in-add_delayed_tre.patch b/patches.suse/0002-btrfs-Use-init_delayed_ref_common-in-add_delayed_tre.patch
new file mode 100644
index 0000000000..b41d8c75b2
--- /dev/null
+++ b/patches.suse/0002-btrfs-Use-init_delayed_ref_common-in-add_delayed_tre.patch
@@ -0,0 +1,69 @@
+From 646f4dd76fb3ac0d1e8677890522d4c044ee2f06 Mon Sep 17 00:00:00 2001
+From: Nikolay Borisov <nborisov@suse.com>
+Date: Tue, 24 Apr 2018 17:18:18 +0300
+Git-commit: 646f4dd76fb3ac0d1e8677890522d4c044ee2f06
+Patch-mainline: v4.18-rc1
+References: bsc#1134813
+Subject: [PATCH 2/8] btrfs: Use init_delayed_ref_common in
+ add_delayed_tree_ref
+
+Use the newly introduced common helper. No functional changes.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/delayed-ref.c | 33 ++++++++++-----------------------
+ 1 file changed, 10 insertions(+), 23 deletions(-)
+
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -737,38 +737,25 @@ add_delayed_tree_ref(struct btrfs_fs_inf
+ {
+ struct btrfs_delayed_tree_ref *full_ref;
+ struct btrfs_delayed_ref_root *delayed_refs;
+- u64 seq = 0;
++ u8 ref_type;
+ int ret;
+
+- if (action == BTRFS_ADD_DELAYED_EXTENT)
+- action = BTRFS_ADD_DELAYED_REF;
+-
+- if (is_fstree(ref_root))
+- seq = atomic64_read(&fs_info->tree_mod_seq);
+ delayed_refs = &trans->transaction->delayed_refs;
+
+- /* first set the basic ref node struct up */
+- atomic_set(&ref->refs, 1);
+- ref->bytenr = bytenr;
+- ref->num_bytes = num_bytes;
+- ref->ref_mod = 1;
+- ref->action = action;
+- ref->is_head = 0;
+- ref->in_tree = 1;
+- ref->seq = seq;
+- RB_CLEAR_NODE(&ref->ref_node);
+- INIT_LIST_HEAD(&ref->add_list);
+-
+ full_ref = btrfs_delayed_node_to_tree_ref(ref);
+- full_ref->parent = parent;
+- full_ref->root = ref_root;
+ if (parent)
+- ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
++ ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
+ else
+- ref->type = BTRFS_TREE_BLOCK_REF_KEY;
++ ref_type = BTRFS_TREE_BLOCK_REF_KEY;
++ init_delayed_ref_common(fs_info, ref, bytenr, num_bytes, ref_root,
++ action, ref_type);
++ full_ref->root = ref_root;
++ full_ref->parent = parent;
+ full_ref->level = level;
+
+- trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
++ trace_add_delayed_tree_ref(fs_info, ref, full_ref,
++ action == BTRFS_ADD_DELAYED_EXTENT ?
++ BTRFS_ADD_DELAYED_REF : action);
+
+ ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
+
diff --git a/patches.suse/0003-btrfs-Use-init_delayed_ref_common-in-add_delayed_dat.patch b/patches.suse/0003-btrfs-Use-init_delayed_ref_common-in-add_delayed_dat.patch
new file mode 100644
index 0000000000..06d5afc447
--- /dev/null
+++ b/patches.suse/0003-btrfs-Use-init_delayed_ref_common-in-add_delayed_dat.patch
@@ -0,0 +1,73 @@
+From c812c8a857a00acae78341d5d4702eb8d7d02661 Mon Sep 17 00:00:00 2001
+From: Nikolay Borisov <nborisov@suse.com>
+Date: Tue, 24 Apr 2018 17:18:19 +0300
+Git-commit: c812c8a857a00acae78341d5d4702eb8d7d02661
+Patch-mainline: v4.18-rc1
+References: bsc#1134813
+Subject: [PATCH 3/8] btrfs: Use init_delayed_ref_common in
+ add_delayed_data_ref
+
+Use the newly introduced helper and remove the duplicate code. No
+functional changes.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/delayed-ref.c | 34 ++++++++++------------------------
+ 1 file changed, 10 insertions(+), 24 deletions(-)
+
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -780,41 +780,27 @@ add_delayed_data_ref(struct btrfs_fs_inf
+ {
+ struct btrfs_delayed_data_ref *full_ref;
+ struct btrfs_delayed_ref_root *delayed_refs;
+- u64 seq = 0;
++ u8 ref_type;
+ int ret;
+
+- if (action == BTRFS_ADD_DELAYED_EXTENT)
+- action = BTRFS_ADD_DELAYED_REF;
+-
+ delayed_refs = &trans->transaction->delayed_refs;
+
+- if (is_fstree(ref_root))
+- seq = atomic64_read(&fs_info->tree_mod_seq);
+-
+- /* first set the basic ref node struct up */
+- atomic_set(&ref->refs, 1);
+- ref->bytenr = bytenr;
+- ref->num_bytes = num_bytes;
+- ref->ref_mod = 1;
+- ref->action = action;
+- ref->is_head = 0;
+- ref->in_tree = 1;
+- ref->seq = seq;
+- RB_CLEAR_NODE(&ref->ref_node);
+- INIT_LIST_HEAD(&ref->add_list);
+-
+ full_ref = btrfs_delayed_node_to_data_ref(ref);
+- full_ref->parent = parent;
+- full_ref->root = ref_root;
+ if (parent)
+- ref->type = BTRFS_SHARED_DATA_REF_KEY;
++ ref_type = BTRFS_SHARED_DATA_REF_KEY;
+ else
+- ref->type = BTRFS_EXTENT_DATA_REF_KEY;
++ ref_type = BTRFS_EXTENT_DATA_REF_KEY;
+
++ init_delayed_ref_common(trans->fs_info, ref, bytenr, num_bytes,
++ ref_root, action, ref_type);
++ full_ref->root = ref_root;
++ full_ref->parent = parent;
+ full_ref->objectid = owner;
+ full_ref->offset = offset;
+
+- trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
++ trace_add_delayed_data_ref(trans->fs_info, ref, full_ref,
++ action == BTRFS_ADD_DELAYED_EXTENT ?
++ BTRFS_ADD_DELAYED_REF : action);
+
+ ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
+ if (ret > 0)
diff --git a/patches.suse/0004-btrfs-Open-code-add_delayed_tree_ref.patch b/patches.suse/0004-btrfs-Open-code-add_delayed_tree_ref.patch
new file mode 100644
index 0000000000..f59ef218b8
--- /dev/null
+++ b/patches.suse/0004-btrfs-Open-code-add_delayed_tree_ref.patch
@@ -0,0 +1,127 @@
+From 70d640004ab5c2597084f6463dd39b36f4f026f8 Mon Sep 17 00:00:00 2001
+From: Nikolay Borisov <nborisov@suse.com>
+Date: Tue, 24 Apr 2018 17:18:20 +0300
+Git-commit: 70d640004ab5c2597084f6463dd39b36f4f026f8
+Patch-mainline: v4.18-rc1
+References: bsc#1134813
+Subject: [PATCH 4/8] btrfs: Open-code add_delayed_tree_ref
+
+Now that the initialization part and the critical section code have been
+split it's a lot easier to open code add_delayed_tree_ref. Do so in the
+following manner:
+
+1. The comming init code is put immediately after memory-to-be-initialized
+ is allocated, followed by the ref-specific member initialization.
+
+2. The only piece of code that remains in the critical section is
+ insert_delayed_ref call.
+
+3. Tracing and memory freeing code is put outside of the critical
+ section as well.
+
+The only real change here is an overall shorter critical section when
+dealing with delayed tree refs. From functional point of view - the code
+is unchanged.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/delayed-ref.c | 64 ++++++++++++++-----------------------------------
+ 1 file changed, 19 insertions(+), 45 deletions(-)
+
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -725,49 +725,6 @@ static void init_delayed_ref_common(stru
+ }
+
+ /*
+- * helper to insert a delayed tree ref into the rbtree.
+- */
+-static noinline void
+-add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+- struct btrfs_trans_handle *trans,
+- struct btrfs_delayed_ref_head *head_ref,
+- struct btrfs_delayed_ref_node *ref, u64 bytenr,
+- u64 num_bytes, u64 parent, u64 ref_root, int level,
+- int action)
+-{
+- struct btrfs_delayed_tree_ref *full_ref;
+- struct btrfs_delayed_ref_root *delayed_refs;
+- u8 ref_type;
+- int ret;
+-
+- delayed_refs = &trans->transaction->delayed_refs;
+-
+- full_ref = btrfs_delayed_node_to_tree_ref(ref);
+- if (parent)
+- ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
+- else
+- ref_type = BTRFS_TREE_BLOCK_REF_KEY;
+- init_delayed_ref_common(fs_info, ref, bytenr, num_bytes, ref_root,
+- action, ref_type);
+- full_ref->root = ref_root;
+- full_ref->parent = parent;
+- full_ref->level = level;
+-
+- trace_add_delayed_tree_ref(fs_info, ref, full_ref,
+- action == BTRFS_ADD_DELAYED_EXTENT ?
+- BTRFS_ADD_DELAYED_REF : action);
+-
+- ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
+-
+- /*
+- * XXX: memory should be freed at the same level allocated.
+- * But bad practice is anywhere... Follow it now. Need cleanup.
+- */
+- if (ret > 0)
+- kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
+-}
+-
+-/*
+ * helper to insert a delayed data ref into the rbtree.
+ */
+ static noinline void
+@@ -824,12 +781,24 @@ int btrfs_add_delayed_tree_ref(struct bt
+ struct btrfs_delayed_ref_root *delayed_refs;
+ struct btrfs_qgroup_extent_record *record = NULL;
+ int is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
++ int ret;
++ u8 ref_type;
+
+ BUG_ON(extent_op && extent_op->is_data);
+ ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
+ if (!ref)
+ return -ENOMEM;
+
++ if (parent)
++ ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
++ else
++ ref_type = BTRFS_TREE_BLOCK_REF_KEY;
++ init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
++ ref_root, action, ref_type);
++ ref->root = ref_root;
++ ref->parent = parent;
++ ref->level = level;
++
+ head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
+ if (!head_ref)
+ goto free_ref;
+@@ -854,10 +823,15 @@ int btrfs_add_delayed_tree_ref(struct bt
+ bytenr, num_bytes, 0, 0, action, 0,
+ is_system, old_ref_mod, new_ref_mod);
+
+- add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
+- num_bytes, parent, ref_root, level, action);
++ ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
+ spin_unlock(&delayed_refs->lock);
+
++ trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
++ action == BTRFS_ADD_DELAYED_EXTENT ?
++ BTRFS_ADD_DELAYED_REF : action);
++ if (ret > 0)
++ kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
++
+ return 0;
+
+ free_head_ref:
diff --git a/patches.suse/0005-btrfs-Open-code-add_delayed_data_ref.patch b/patches.suse/0005-btrfs-Open-code-add_delayed_data_ref.patch
new file mode 100644
index 0000000000..f6a44d9606
--- /dev/null
+++ b/patches.suse/0005-btrfs-Open-code-add_delayed_data_ref.patch
@@ -0,0 +1,123 @@
+From cd7f9699b113434467434580ebb8d9b328152fb8 Mon Sep 17 00:00:00 2001
+From: Nikolay Borisov <nborisov@suse.com>
+Date: Tue, 24 Apr 2018 17:18:21 +0300
+Git-commit: cd7f9699b113434467434580ebb8d9b328152fb8
+Patch-mainline: v4.18-rc1
+References: bsc#1134813
+Subject: [PATCH 5/8] btrfs: Open-code add_delayed_data_ref
+
+Now that the initialization part and the critical section code have been
+split it's a lot easier to open code add_delayed_data_ref. Do so in the
+following manner:
+
+1. The common init function is put immediately after memory-to-be-initialized
+ is allocated, followed by the specific data ref initialization.
+
+2. The only piece of code that remains in the critical section is
+ insert_delayed_ref call.
+
+3. Tracing and memory freeing code is moved outside of the critical
+ section.
+
+No functional changes, just an overall shorter critical section.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/delayed-ref.c | 62 +++++++++++++++----------------------------------
+ 1 file changed, 19 insertions(+), 43 deletions(-)
+
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -725,46 +725,6 @@ static void init_delayed_ref_common(stru
+ }
+
+ /*
+- * helper to insert a delayed data ref into the rbtree.
+- */
+-static noinline void
+-add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+- struct btrfs_trans_handle *trans,
+- struct btrfs_delayed_ref_head *head_ref,
+- struct btrfs_delayed_ref_node *ref, u64 bytenr,
+- u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
+- u64 offset, int action)
+-{
+- struct btrfs_delayed_data_ref *full_ref;
+- struct btrfs_delayed_ref_root *delayed_refs;
+- u8 ref_type;
+- int ret;
+-
+- delayed_refs = &trans->transaction->delayed_refs;
+-
+- full_ref = btrfs_delayed_node_to_data_ref(ref);
+- if (parent)
+- ref_type = BTRFS_SHARED_DATA_REF_KEY;
+- else
+- ref_type = BTRFS_EXTENT_DATA_REF_KEY;
+-
+- init_delayed_ref_common(trans->fs_info, ref, bytenr, num_bytes,
+- ref_root, action, ref_type);
+- full_ref->root = ref_root;
+- full_ref->parent = parent;
+- full_ref->objectid = owner;
+- full_ref->offset = offset;
+-
+- trace_add_delayed_data_ref(trans->fs_info, ref, full_ref,
+- action == BTRFS_ADD_DELAYED_EXTENT ?
+- BTRFS_ADD_DELAYED_REF : action);
+-
+- ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
+- if (ret > 0)
+- kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
+-}
+-
+-/*
+ * add a delayed tree ref. This does all of the accounting required
+ * to make sure the delayed ref is eventually processed before this
+ * transaction commits.
+@@ -857,12 +817,25 @@ int btrfs_add_delayed_data_ref(struct bt
+ struct btrfs_delayed_ref_head *head_ref;
+ struct btrfs_delayed_ref_root *delayed_refs;
+ struct btrfs_qgroup_extent_record *record = NULL;
++ int ret;
++ u8 ref_type;
+
+ BUG_ON(extent_op && !extent_op->is_data);
+ ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
+ if (!ref)
+ return -ENOMEM;
+
++ if (parent)
++ ref_type = BTRFS_SHARED_DATA_REF_KEY;
++ else
++ ref_type = BTRFS_EXTENT_DATA_REF_KEY;
++ init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
++ ref_root, action, ref_type);
++ ref->root = ref_root;
++ ref->parent = parent;
++ ref->objectid = owner;
++ ref->offset = offset;
++
+ head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
+ if (!head_ref) {
+ kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
+@@ -893,11 +866,14 @@ int btrfs_add_delayed_data_ref(struct bt
+ bytenr, num_bytes, ref_root, reserved,
+ action, 1, 0, old_ref_mod, new_ref_mod);
+
+- add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
+- num_bytes, parent, ref_root, owner, offset,
+- action);
++ ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
+ spin_unlock(&delayed_refs->lock);
+
++ trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
++ action == BTRFS_ADD_DELAYED_EXTENT ?
++ BTRFS_ADD_DELAYED_REF : action);
++ if (ret > 0)
++ kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
+ return 0;
+ }
+
diff --git a/patches.suse/0006-btrfs-Introduce-init_delayed_ref_head.patch b/patches.suse/0006-btrfs-Introduce-init_delayed_ref_head.patch
new file mode 100644
index 0000000000..5b0f6c7be5
--- /dev/null
+++ b/patches.suse/0006-btrfs-Introduce-init_delayed_ref_head.patch
@@ -0,0 +1,97 @@
+From a2e569b3f2b138f2c25b4598cf4b18af8af39abd Mon Sep 17 00:00:00 2001
+From: Nikolay Borisov <nborisov@suse.com>
+Date: Tue, 24 Apr 2018 17:18:22 +0300
+Git-commit: a2e569b3f2b138f2c25b4598cf4b18af8af39abd
+Patch-mainline: v4.18-rc1
+References: bsc#1134813
+Subject: [PATCH 6/8] btrfs: Introduce init_delayed_ref_head
+
+add_delayed_ref_head implements the logic to both initialize a head_ref
+structure as well as perform the necessary operations to add it to the
+delayed ref machinery. This has resulted in a very cumebrsome interface
+with loads of parameters and code, which at first glance, looks very
+unwieldy. Begin untangling it by first extracting the initialization
+only code in its own function. It's more or less verbatim copy of the
+first part of add_delayed_ref_head.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/delayed-ref.c | 65 +++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 65 insertions(+)
+
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -542,6 +542,71 @@ update_existing_head_ref(struct btrfs_de
+ spin_unlock(&existing->lock);
+ }
+
++static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
++ struct btrfs_qgroup_extent_record *qrecord,
++ u64 bytenr, u64 num_bytes, u64 ref_root,
++ u64 reserved, int action, bool is_data,
++ bool is_system)
++{
++ int count_mod = 1;
++ int must_insert_reserved = 0;
++
++ /* If reserved is provided, it must be a data extent. */
++ BUG_ON(!is_data && reserved);
++
++ /*
++ * The head node stores the sum of all the mods, so dropping a ref
++ * should drop the sum in the head node by one.
++ */
++ if (action == BTRFS_UPDATE_DELAYED_HEAD)
++ count_mod = 0;
++ else if (action == BTRFS_DROP_DELAYED_REF)
++ count_mod = -1;
++
++ /*
++ * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
++ * accounting when the extent is finally added, or if a later
++ * modification deletes the delayed ref without ever inserting the
++ * extent into the extent allocation tree. ref->must_insert_reserved
++ * is the flag used to record that accounting mods are required.
++ *
++ * Once we record must_insert_reserved, switch the action to
++ * BTRFS_ADD_DELAYED_REF because other special casing is not required.
++ */
++ if (action == BTRFS_ADD_DELAYED_EXTENT)
++ must_insert_reserved = 1;
++ else
++ must_insert_reserved = 0;
++
++ atomic_set(&head_ref->refs, 1);
++ head_ref->bytenr = bytenr;
++ head_ref->num_bytes = num_bytes;
++ head_ref->ref_mod = count_mod;
++ head_ref->must_insert_reserved = must_insert_reserved;
++ head_ref->is_data = is_data;
++ head_ref->is_system = is_system;
++ head_ref->ref_tree = RB_ROOT;
++ INIT_LIST_HEAD(&head_ref->ref_add_list);
++ RB_CLEAR_NODE(&head_ref->href_node);
++ head_ref->processing = 0;
++ head_ref->total_ref_mod = count_mod;
++ head_ref->qgroup_reserved = 0;
++ head_ref->qgroup_ref_root = 0;
++ spin_lock_init(&head_ref->lock);
++ mutex_init(&head_ref->mutex);
++
++ if (qrecord) {
++ if (ref_root && reserved) {
++ head_ref->qgroup_ref_root = ref_root;
++ head_ref->qgroup_reserved = reserved;
++ }
++
++ qrecord->bytenr = bytenr;
++ qrecord->num_bytes = num_bytes;
++ qrecord->old_roots = NULL;
++ }
++}
++
+ /*
+ * helper function to actually insert a head node into the rbtree.
+ * this does all the dirty work in terms of maintaining the correct
diff --git a/patches.suse/0007-btrfs-Use-init_delayed_ref_head-in-add_delayed_ref_h.patch b/patches.suse/0007-btrfs-Use-init_delayed_ref_head-in-add_delayed_ref_h.patch
new file mode 100644
index 0000000000..d02233c4fc
--- /dev/null
+++ b/patches.suse/0007-btrfs-Use-init_delayed_ref_head-in-add_delayed_ref_h.patch
@@ -0,0 +1,100 @@
+From eb86ec73b968b2895ffede893b33bf49bbc9bf5c Mon Sep 17 00:00:00 2001
+From: Nikolay Borisov <nborisov@suse.com>
+Date: Tue, 24 Apr 2018 17:18:23 +0300
+Git-commit: eb86ec73b968b2895ffede893b33bf49bbc9bf5c
+Patch-mainline: v4.18-rc1
+References: bsc#1134813
+Subject: [PATCH 7/8] btrfs: Use init_delayed_ref_head in add_delayed_ref_head
+
+Use the newly introduced function when initialising the head_ref in
+add_delayed_ref_head. No functional changes.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/delayed-ref.c | 59 +++----------------------------------------------
+ 1 file changed, 4 insertions(+), 55 deletions(-)
+
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -624,67 +624,16 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ {
+ struct btrfs_delayed_ref_head *existing;
+ struct btrfs_delayed_ref_root *delayed_refs;
+- int count_mod = 1;
+- int must_insert_reserved = 0;
+-
+- /* If reserved is provided, it must be a data extent. */
+- BUG_ON(!is_data && reserved);
+-
+- /*
+- * the head node stores the sum of all the mods, so dropping a ref
+- * should drop the sum in the head node by one.
+- */
+- if (action == BTRFS_UPDATE_DELAYED_HEAD)
+- count_mod = 0;
+- else if (action == BTRFS_DROP_DELAYED_REF)
+- count_mod = -1;
+-
+- /*
+- * BTRFS_ADD_DELAYED_EXTENT means that we need to update
+- * the reserved accounting when the extent is finally added, or
+- * if a later modification deletes the delayed ref without ever
+- * inserting the extent into the extent allocation tree.
+- * ref->must_insert_reserved is the flag used to record
+- * that accounting mods are required.
+- *
+- * Once we record must_insert_reserved, switch the action to
+- * BTRFS_ADD_DELAYED_REF because other special casing is not required.
+- */
+- if (action == BTRFS_ADD_DELAYED_EXTENT)
+- must_insert_reserved = 1;
+- else
+- must_insert_reserved = 0;
+
+ delayed_refs = &trans->transaction->delayed_refs;
+
+- atomic_set(&head_ref->refs, 1);
+- head_ref->bytenr = bytenr;
+- head_ref->num_bytes = num_bytes;
+- head_ref->ref_mod = count_mod;
+- head_ref->must_insert_reserved = must_insert_reserved;
+- head_ref->is_data = is_data;
+- head_ref->is_system = is_system;
+- head_ref->ref_tree = RB_ROOT;
+- INIT_LIST_HEAD(&head_ref->ref_add_list);
+- RB_CLEAR_NODE(&head_ref->href_node);
+- head_ref->processing = 0;
+- head_ref->total_ref_mod = count_mod;
+- head_ref->qgroup_reserved = 0;
+- head_ref->qgroup_ref_root = 0;
+- spin_lock_init(&head_ref->lock);
+ mutex_init(&head_ref->mutex);
+
++ init_delayed_ref_head(head_ref, qrecord, bytenr, num_bytes, ref_root,
++ reserved, action, is_data, is_system);
++
+ /* Record qgroup extent info if provided */
+ if (qrecord) {
+- if (ref_root && reserved) {
+- head_ref->qgroup_ref_root = ref_root;
+- head_ref->qgroup_reserved = reserved;
+- }
+-
+- qrecord->bytenr = bytenr;
+- qrecord->num_bytes = num_bytes;
+- qrecord->old_roots = NULL;
+-
+ if(btrfs_qgroup_trace_extent_nolock(fs_info,
+ delayed_refs, qrecord))
+ kfree(qrecord);
+@@ -708,7 +657,7 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ } else {
+ if (old_ref_mod)
+ *old_ref_mod = 0;
+- if (is_data && count_mod < 0)
++ if (is_data && head_ref->ref_mod < 0)
+ delayed_refs->pending_csums += num_bytes;
+ delayed_refs->num_heads++;
+ delayed_refs->num_heads_ready++;
diff --git a/patches.suse/0008-btrfs-split-delayed-ref-head-initialization-and-addi.patch b/patches.suse/0008-btrfs-split-delayed-ref-head-initialization-and-addi.patch
new file mode 100644
index 0000000000..0f6ed6d96c
--- /dev/null
+++ b/patches.suse/0008-btrfs-split-delayed-ref-head-initialization-and-addi.patch
@@ -0,0 +1,157 @@
+From 2335efafa63f0c675ebb4f8908fff9e972fb8a58 Mon Sep 17 00:00:00 2001
+From: Nikolay Borisov <nborisov@suse.com>
+Date: Tue, 24 Apr 2018 17:18:24 +0300
+Git-commit: 2335efafa63f0c675ebb4f8908fff9e972fb8a58
+Patch-mainline: v4.18-rc1
+References: bsc#1134813
+Subject: [PATCH 8/8] btrfs: split delayed ref head initialization and addition
+
+add_delayed_ref_head really performed 2 independent operations -
+initialisting the ref head and adding it to a list. Now that the init
+part is in a separate function let's complete the separation between
+both operations. This results in a lot simpler interface for
+add_delayed_ref_head since the function now deals solely with either
+adding the newly initialised delayed ref head or merging it into an
+existing delayed ref head. This results in vastly simplified function
+signature since 5 arguments are dropped. The only other thing worth
+mentioning is that due to this split the WARN_ON catching reinit of
+existing. In this patch the condition is extended such that:
+
+ qrecord && head_ref->qgroup_ref_root && head_ref->qgroup_reserved
+
+is added. This is done because the two qgroup_* prefixed member are
+set only if both ref_root and reserved are passed. So functionally
+it's equivalent to the old WARN_ON and allows to remove the two args
+from add_delayed_ref_head.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/delayed-ref.c | 40 +++++++++++++++++++---------------------
+ 1 file changed, 19 insertions(+), 21 deletions(-)
+
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -617,8 +617,7 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_head *head_ref,
+ struct btrfs_qgroup_extent_record *qrecord,
+- u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
+- int action, int is_data, int is_system, int *old_ref_mod,
++ int action, int *old_ref_mod,
+ int *new_ref_mod)
+
+ {
+@@ -629,9 +628,6 @@ add_delayed_ref_head(struct btrfs_fs_inf
+
+ mutex_init(&head_ref->mutex);
+
+- init_delayed_ref_head(head_ref, qrecord, bytenr, num_bytes, ref_root,
+- reserved, action, is_data, is_system);
+-
+ /* Record qgroup extent info if provided */
+ if (qrecord) {
+ if(btrfs_qgroup_trace_extent_nolock(fs_info,
+@@ -644,7 +640,9 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ existing = htree_insert(&delayed_refs->href_root,
+ &head_ref->href_node);
+ if (existing) {
+- WARN_ON(ref_root && reserved && existing->qgroup_ref_root
++ WARN_ON(qrecord && head_ref->qgroup_ref_root
++ && head_ref->qgroup_reserved
++ && existing->qgroup_ref_root
+ && existing->qgroup_reserved);
+ update_existing_head_ref(delayed_refs, existing, head_ref,
+ old_ref_mod);
+@@ -657,8 +655,8 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ } else {
+ if (old_ref_mod)
+ *old_ref_mod = 0;
+- if (is_data && head_ref->ref_mod < 0)
+- delayed_refs->pending_csums += num_bytes;
++ if (head_ref->is_data && head_ref->ref_mod < 0)
++ delayed_refs->pending_csums += head_ref->num_bytes;
+ delayed_refs->num_heads++;
+ delayed_refs->num_heads_ready++;
+ atomic_inc(&delayed_refs->num_entries);
+@@ -666,6 +664,7 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ }
+ if (new_ref_mod)
+ *new_ref_mod = head_ref->total_ref_mod;
++
+ return head_ref;
+ }
+
+@@ -736,7 +735,7 @@ int btrfs_add_delayed_tree_ref(struct bt
+ struct btrfs_delayed_ref_head *head_ref;
+ struct btrfs_delayed_ref_root *delayed_refs;
+ struct btrfs_qgroup_extent_record *record = NULL;
+- int is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
++ bool is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
+ int ret;
+ u8 ref_type;
+
+@@ -766,6 +765,8 @@ int btrfs_add_delayed_tree_ref(struct bt
+ goto free_head_ref;
+ }
+
++ init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
++ ref_root, 0, action, false, is_system);
+ head_ref->extent_op = extent_op;
+
+ delayed_refs = &trans->transaction->delayed_refs;
+@@ -776,8 +777,7 @@ int btrfs_add_delayed_tree_ref(struct bt
+ * the spin lock
+ */
+ head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
+- bytenr, num_bytes, 0, 0, action, 0,
+- is_system, old_ref_mod, new_ref_mod);
++ action, old_ref_mod, new_ref_mod);
+
+ ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
+ spin_unlock(&delayed_refs->lock);
+@@ -849,6 +849,8 @@ int btrfs_add_delayed_data_ref(struct bt
+ }
+ }
+
++ init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
++ reserved, action, true, false);
+ head_ref->extent_op = extent_op;
+
+ delayed_refs = &trans->transaction->delayed_refs;
+@@ -859,8 +861,7 @@ int btrfs_add_delayed_data_ref(struct bt
+ * the spin lock
+ */
+ head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
+- bytenr, num_bytes, ref_root, reserved,
+- action, 1, 0, old_ref_mod, new_ref_mod);
++ action, old_ref_mod, new_ref_mod);
+
+ ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
+ spin_unlock(&delayed_refs->lock);
+@@ -913,19 +914,16 @@ int btrfs_add_delayed_extent_op(struct b
+ if (!head_ref)
+ return -ENOMEM;
+
++ init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
++ BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data,
++ false);
+ head_ref->extent_op = extent_op;
+
+ delayed_refs = &trans->transaction->delayed_refs;
+ spin_lock(&delayed_refs->lock);
+
+- /*
+- * extent_ops just modify the flags of an extent and they don't result
+- * in ref count changes, hence it's safe to pass false/0 for is_system
+- * argument
+- */
+- add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr,
+- num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
+- extent_op->is_data, 0, NULL, NULL);
++ add_delayed_ref_head(fs_info, trans, head_ref, NULL,
++ BTRFS_UPDATE_DELAYED_HEAD, NULL, NULL);
+
+ spin_unlock(&delayed_refs->lock);
+ return 0;
diff --git a/patches.suse/btrfs-fix-race-condition-between-delayed-refs-and-blockgroup-removal.patch b/patches.suse/btrfs-fix-race-condition-between-delayed-refs-and-blockgroup-removal.patch
index 3a02b8fa0e..91563d322f 100644
--- a/patches.suse/btrfs-fix-race-condition-between-delayed-refs-and-blockgroup-removal.patch
+++ b/patches.suse/btrfs-fix-race-condition-between-delayed-refs-and-blockgroup-removal.patch
@@ -51,13 +51,13 @@ Acked-by: Jeff Mahoney <jeffm@suse.com>
---
fs/btrfs/delayed-ref.c | 16 ++++++++++++----
fs/btrfs/delayed-ref.h | 1 +
- fs/btrfs/extent-tree.c | 19 ++++++++++++-------
- 3 files changed, 25 insertions(+), 11 deletions(-)
+ fs/btrfs/extent-tree.c | 20 +++++++++++++-------
+ 3 files changed, 26 insertions(+), 11 deletions(-)
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
-@@ -562,8 +562,9 @@ add_delayed_ref_head(struct btrfs_fs_inf
- struct btrfs_delayed_ref_node *ref,
+@@ -553,8 +553,9 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ struct btrfs_delayed_ref_head *head_ref,
struct btrfs_qgroup_extent_record *qrecord,
u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
- int action, int is_data, int *old_ref_mod,
@@ -66,16 +66,16 @@ Acked-by: Jeff Mahoney <jeffm@suse.com>
+
{
struct btrfs_delayed_ref_head *existing;
- struct btrfs_delayed_ref_head *head_ref = NULL;
-@@ -615,6 +616,7 @@ add_delayed_ref_head(struct btrfs_fs_inf
- head_ref = btrfs_delayed_node_to_head(ref);
+ struct btrfs_delayed_ref_root *delayed_refs;
+@@ -597,6 +598,7 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ head_ref->ref_mod = count_mod;
head_ref->must_insert_reserved = must_insert_reserved;
head_ref->is_data = is_data;
+ head_ref->is_system = is_system;
head_ref->ref_tree = RB_ROOT;
INIT_LIST_HEAD(&head_ref->ref_add_list);
- head_ref->processing = 0;
-@@ -797,6 +799,7 @@ int btrfs_add_delayed_tree_ref(struct bt
+ RB_CLEAR_NODE(&head_ref->href_node);
+@@ -779,6 +781,7 @@ int btrfs_add_delayed_tree_ref(struct bt
struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_qgroup_extent_record *record = NULL;
@@ -83,25 +83,25 @@ Acked-by: Jeff Mahoney <jeffm@suse.com>
BUG_ON(extent_op && extent_op->is_data);
ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
-@@ -825,7 +828,7 @@ int btrfs_add_delayed_tree_ref(struct bt
+@@ -807,7 +810,7 @@ int btrfs_add_delayed_tree_ref(struct bt
*/
- head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
+ head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
bytenr, num_bytes, 0, 0, action, 0,
- old_ref_mod, new_ref_mod);
+ is_system, old_ref_mod, new_ref_mod);
add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
num_bytes, parent, ref_root, level, action);
-@@ -890,7 +893,7 @@ int btrfs_add_delayed_data_ref(struct bt
+@@ -872,7 +875,7 @@ int btrfs_add_delayed_data_ref(struct bt
*/
- head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
+ head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
bytenr, num_bytes, ref_root, reserved,
- action, 1, old_ref_mod, new_ref_mod);
+ action, 1, 0, old_ref_mod, new_ref_mod);
add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
num_bytes, parent, ref_root, owner, offset,
-@@ -945,9 +948,14 @@ int btrfs_add_delayed_extent_op(struct b
+@@ -927,9 +930,14 @@ int btrfs_add_delayed_extent_op(struct b
delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
@@ -110,7 +110,7 @@ Acked-by: Jeff Mahoney <jeffm@suse.com>
+ * in ref count changes, hence it's safe to pass false/0 for is_system
+ * argument
+ */
- add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
+ add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr,
num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
- extent_op->is_data, NULL, NULL);
+ extent_op->is_data, 0, NULL, NULL);
@@ -129,20 +129,20 @@ Acked-by: Jeff Mahoney <jeffm@suse.com>
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
-@@ -2525,14 +2525,19 @@ static int cleanup_ref_head(struct btrfs
- head->node.action);
+@@ -2524,13 +2524,19 @@ static int cleanup_ref_head(struct btrfs
+ trace_run_delayed_ref_head(fs_info, head, 0);
if (head->total_ref_mod < 0) {
- struct btrfs_block_group_cache *cache;
-
-- cache = btrfs_lookup_block_group(fs_info, head->node.bytenr);
+- cache = btrfs_lookup_block_group(fs_info, head->bytenr);
- ASSERT(cache);
- percpu_counter_add(&cache->space_info->total_bytes_pinned,
-- -head->node.num_bytes);
+- -head->num_bytes);
- btrfs_put_block_group(cache);
+ struct btrfs_space_info *space_info;
+ u64 flags;
-
++
+ if (head->is_data)
+ flags = BTRFS_BLOCK_GROUP_DATA;
+ else if (head->is_system)
@@ -152,7 +152,7 @@ Acked-by: Jeff Mahoney <jeffm@suse.com>
+ space_info = __find_space_info(fs_info, flags);
+ ASSERT(space_info);
+ percpu_counter_add(&space_info->total_bytes_pinned,
-+ -head->node.num_bytes);
++ -head->num_bytes);
+
if (head->is_data) {
spin_lock(&delayed_refs->lock);
- delayed_refs->pending_csums -= head->node.num_bytes;
diff --git a/series.conf b/series.conf
index 4e91f36add..67fd91fd35 100644
--- a/series.conf
+++ b/series.conf
@@ -23307,6 +23307,7 @@
patches.suse/0003-btrfs-breakout-empty-head-cleanup-to-a-helper.patch
patches.suse/0004-btrfs-move-ref_mod-modification-into-the-if-ref-logi.patch
patches.suse/0005-btrfs-move-all-ref-head-cleanup-to-the-helper-functi.patch
+ patches.suse/0001-btrfs-remove-delayed_ref_node-from-ref_head.patch
patches.suse/0020-btrfs-remove-type-argument-from-comp_tree_refs.patch
patches.suse/0016-btrfs-cleanup-extent-locking-sequence.patch
patches.suse/0017-Btrfs-rework-outstanding_extents.patch
@@ -24457,6 +24458,14 @@
patches.drivers/nvme-fabrics-allow-internal-passthrough-command-on-d.patch
patches.fixes/restore-cond_resched-in-shrink_dcache_parent.patch
patches.fixes/rmdir-rename-do-shrink_dcache_parent-only-on-success.patch
+ patches.suse/0001-btrfs-Factor-out-common-delayed-refs-init-code.patch
+ patches.suse/0002-btrfs-Use-init_delayed_ref_common-in-add_delayed_tre.patch
+ patches.suse/0003-btrfs-Use-init_delayed_ref_common-in-add_delayed_dat.patch
+ patches.suse/0004-btrfs-Open-code-add_delayed_tree_ref.patch
+ patches.suse/0005-btrfs-Open-code-add_delayed_data_ref.patch
+ patches.suse/0006-btrfs-Introduce-init_delayed_ref_head.patch
+ patches.suse/0007-btrfs-Use-init_delayed_ref_head-in-add_delayed_ref_h.patch
+ patches.suse/0008-btrfs-split-delayed-ref-head-initialization-and-addi.patch
patches.fixes/0001-btrfs-qgroup-Search-commit-root-for-rescan-to-avoid-.patch
patches.fixes/0001-dlm-fix-a-clerical-error-when-set-SCTP_NODELAY.patch
patches.fixes/0002-dlm-make-sctp_connect_to_sock-return-in-specified-ti.patch