Home Home > GIT Browse > vanilla
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJiri Slaby <jslaby@suse.cz>2014-09-07 06:40:17 +0200
committerJiri Slaby <jslaby@suse.cz>2014-09-07 06:40:17 +0200
commitdcee3975936b33996dd656617da8e1879ef4dcbd (patch)
tree566492fd757f92db25607278d87c86250a7db801
parent2ab65856e31615961771d3a3f9ec60b3ef0e6eec (diff)
- Linux 3.16.2 (bko#81111 bnc#887046 bnc#889790).rpm-3.16.2-1.1.gdcee397
- Update config files. - Refresh patches.xen/xen-x86-EFI. - Refresh patches.xen/xen-x86-bzImage. - Refresh patches.xen/xen3-auto-common.diff. - Refresh patches.xen/xen3-patch-2.6.37. - Delete patches.fixes/Btrfs-Fix-memory-corruption-by-ulist_add_merge-on-32. - Delete patches.fixes/drm-omapdrm-fix-compiler-errors. - Delete patches.fixes/nfs-nfs3_list_one_acl-check-get_acl-result-with-is_err_or_null. - Delete patches.suse/btrfs-0003-read-lock-extent-buffer-while-walking-backrefs.patch. - Delete patches.suse/btrfs-0006-fix-compressed-write-corruption-on-enospc.patch. - Delete patches.suse/btrfs-0008-fix-csum-tree-corruption-duplicate-and-outdate.patch. - Delete patches.suse/btrfs-0033-fix-task-hang-under-heavy-compressed-write.patch.
-rw-r--r--config/i386/xen5
-rw-r--r--config/x86_64/xen4
-rw-r--r--patches.fixes/Btrfs-Fix-memory-corruption-by-ulist_add_merge-on-32114
-rw-r--r--patches.fixes/drm-omapdrm-fix-compiler-errors113
-rw-r--r--patches.fixes/nfs-nfs3_list_one_acl-check-get_acl-result-with-is_err_or_null32
-rw-r--r--patches.kernel.org/patch-3.16.1-26076
-rw-r--r--patches.suse/btrfs-0003-read-lock-extent-buffer-while-walking-backrefs.patch30
-rw-r--r--patches.suse/btrfs-0006-fix-compressed-write-corruption-on-enospc.patch41
-rw-r--r--patches.suse/btrfs-0008-fix-csum-tree-corruption-duplicate-and-outdate.patch104
-rw-r--r--patches.suse/btrfs-0033-fix-task-hang-under-heavy-compressed-write.patch567
-rw-r--r--patches.xen/xen-x86-EFI2
-rw-r--r--patches.xen/xen-x86-bzImage2
-rw-r--r--patches.xen/xen3-auto-common.diff4
-rw-r--r--patches.xen/xen3-patch-2.6.376
-rw-r--r--series.conf9
15 files changed, 6091 insertions, 1018 deletions
diff --git a/config/i386/xen b/config/i386/xen
index a6e22f166f..c9de00f154 100644
--- a/config/i386/xen
+++ b/config/i386/xen
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/i386 3.16.0 Kernel Configuration
+# Linux/i386 3.16.2 Kernel Configuration
#
# CONFIG_64BIT is not set
CONFIG_X86_32=y
@@ -453,6 +453,9 @@ CONFIG_HZ=250
CONFIG_SCHED_HRTICK=y
CONFIG_KEXEC=y
CONFIG_PHYSICAL_START=0x2000
+CONFIG_RELOCATABLE=y
+# CONFIG_RANDOMIZE_BASE is not set
+CONFIG_X86_NEED_RELOCS=y
CONFIG_PHYSICAL_ALIGN=0x2000
CONFIG_XEN_BZIMAGE=y
CONFIG_HOTPLUG_CPU=y
diff --git a/config/x86_64/xen b/config/x86_64/xen
index fdf5c9b48d..185ad29094 100644
--- a/config/x86_64/xen
+++ b/config/x86_64/xen
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/x86_64 3.16.0 Kernel Configuration
+# Linux/x86_64 3.16.2 Kernel Configuration
#
CONFIG_64BIT=y
CONFIG_X86_64=y
@@ -439,6 +439,8 @@ CONFIG_HZ=250
CONFIG_SCHED_HRTICK=y
CONFIG_KEXEC=y
CONFIG_PHYSICAL_START=0x4000
+CONFIG_RELOCATABLE=y
+# CONFIG_RANDOMIZE_BASE is not set
CONFIG_PHYSICAL_ALIGN=0x4000
CONFIG_XEN_BZIMAGE=y
CONFIG_HOTPLUG_CPU=y
diff --git a/patches.fixes/Btrfs-Fix-memory-corruption-by-ulist_add_merge-on-32 b/patches.fixes/Btrfs-Fix-memory-corruption-by-ulist_add_merge-on-32
deleted file mode 100644
index 760a656763..0000000000
--- a/patches.fixes/Btrfs-Fix-memory-corruption-by-ulist_add_merge-on-32
+++ /dev/null
@@ -1,114 +0,0 @@
-From 4eb1f66dce6c4dc28dd90a7ffbe6b2b1cb08aa4e Mon Sep 17 00:00:00 2001
-From: Takashi Iwai <tiwai@suse.de>
-Date: Mon, 28 Jul 2014 10:57:04 +0200
-Subject: [PATCH] Btrfs: Fix memory corruption by ulist_add_merge() on 32bit arch
-Git-commit: 4eb1f66dce6c4dc28dd90a7ffbe6b2b1cb08aa4e
-Patch-mainline: 3.17-rc1
-References: bnc#887046
-
-We've got bug reports that btrfs crashes when quota is enabled on
-32bit kernel, typically with the Oops like below:
- BUG: unable to handle kernel NULL pointer dereference at 00000004
- IP: [<f9234590>] find_parent_nodes+0x360/0x1380 [btrfs]
- *pde = 00000000
- Oops: 0000 [#1] SMP
- CPU: 0 PID: 151 Comm: kworker/u8:2 Tainted: G S W 3.15.2-1.gd43d97e-default #1
- Workqueue: btrfs-qgroup-rescan normal_work_helper [btrfs]
- task: f1478130 ti: f147c000 task.ti: f147c000
- EIP: 0060:[<f9234590>] EFLAGS: 00010213 CPU: 0
- EIP is at find_parent_nodes+0x360/0x1380 [btrfs]
- EAX: f147dda8 EBX: f147ddb0 ECX: 00000011 EDX: 00000000
- ESI: 00000000 EDI: f147dda4 EBP: f147ddf8 ESP: f147dd38
- DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068
- CR0: 8005003b CR2: 00000004 CR3: 00bf3000 CR4: 00000690
- Stack:
- 00000000 00000000 f147dda4 00000050 00000001 00000000 00000001 00000050
- 00000001 00000000 d3059000 00000001 00000022 000000a8 00000000 00000000
- 00000000 000000a1 00000000 00000000 00000001 00000000 00000000 11800000
- Call Trace:
- [<f923564d>] __btrfs_find_all_roots+0x9d/0xf0 [btrfs]
- [<f9237bb1>] btrfs_qgroup_rescan_worker+0x401/0x760 [btrfs]
- [<f9206148>] normal_work_helper+0xc8/0x270 [btrfs]
- [<c025e38b>] process_one_work+0x11b/0x390
- [<c025eea1>] worker_thread+0x101/0x340
- [<c026432b>] kthread+0x9b/0xb0
- [<c0712a71>] ret_from_kernel_thread+0x21/0x30
- [<c0264290>] kthread_create_on_node+0x110/0x110
-
-This indicates a NULL corruption in prefs_delayed list. The further
-investigation and bisection pointed that the call of ulist_add_merge()
-results in the corruption.
-
-ulist_add_merge() takes u64 as aux and writes a 64bit value into
-old_aux. The callers of this function in backref.c, however, pass a
-pointer of a pointer to old_aux. That is, the function overwrites
-64bit value on 32bit pointer. This caused a NULL in the adjacent
-variable, in this case, prefs_delayed.
-
-Here is a quick attempt to band-aid over this: a new function,
-ulist_add_merge_ptr() is introduced to pass/store properly a pointer
-value instead of u64. There are still ugly void ** cast remaining
-in the callers because void ** cannot be taken implicitly. But, it's
-safer than explicit cast to u64, anyway.
-
-Bugzilla: https://bugzilla.novell.com/show_bug.cgi?id=887046
-Cc: <stable@vger.kernel.org> [v3.11+]
-Signed-off-by: Takashi Iwai <tiwai@suse.de>
-Signed-off-by: Chris Mason <clm@fb.com>
-
----
- fs/btrfs/backref.c | 11 +++++------
- fs/btrfs/ulist.h | 15 +++++++++++++++
- 2 files changed, 20 insertions(+), 6 deletions(-)
-
---- a/fs/btrfs/backref.c
-+++ b/fs/btrfs/backref.c
-@@ -276,9 +276,8 @@ static int add_all_parents(struct btrfs_
- }
- if (ret > 0)
- goto next;
-- ret = ulist_add_merge(parents, eb->start,
-- (uintptr_t)eie,
-- (u64 *)&old, GFP_NOFS);
-+ ret = ulist_add_merge_ptr(parents, eb->start,
-+ eie, (void **)&old, GFP_NOFS);
- if (ret < 0)
- break;
- if (!ret && extent_item_pos) {
-@@ -1011,9 +1010,9 @@ again:
- goto out;
- ref->inode_list = eie;
- }
-- ret = ulist_add_merge(refs, ref->parent,
-- (uintptr_t)ref->inode_list,
-- (u64 *)&eie, GFP_NOFS);
-+ ret = ulist_add_merge_ptr(refs, ref->parent,
-+ ref->inode_list,
-+ (void **)&eie, GFP_NOFS);
- if (ret < 0)
- goto out;
- if (!ret && extent_item_pos) {
---- a/fs/btrfs/ulist.h
-+++ b/fs/btrfs/ulist.h
-@@ -57,6 +57,21 @@ void ulist_free(struct ulist *ulist);
- int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask);
- int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
- u64 *old_aux, gfp_t gfp_mask);
-+
-+/* just like ulist_add_merge() but take a pointer for the aux data */
-+static inline int ulist_add_merge_ptr(struct ulist *ulist, u64 val, void *aux,
-+ void **old_aux, gfp_t gfp_mask)
-+{
-+#if BITS_PER_LONG == 32
-+ u64 old64 = (uintptr_t)*old_aux;
-+ int ret = ulist_add_merge(ulist, val, (uintptr_t)aux, &old64, gfp_mask);
-+ *old_aux = (void *)((uintptr_t)old64);
-+ return ret;
-+#else
-+ return ulist_add_merge(ulist, val, (u64)aux, (u64 *)old_aux, gfp_mask);
-+#endif
-+}
-+
- struct ulist_node *ulist_next(struct ulist *ulist,
- struct ulist_iterator *uiter);
-
diff --git a/patches.fixes/drm-omapdrm-fix-compiler-errors b/patches.fixes/drm-omapdrm-fix-compiler-errors
deleted file mode 100644
index a72003423f..0000000000
--- a/patches.fixes/drm-omapdrm-fix-compiler-errors
+++ /dev/null
@@ -1,113 +0,0 @@
-From 2d31ca3ad7d5d44c8adc7f253c96ce33f3a2e931 Mon Sep 17 00:00:00 2001
-From: Russell King <rmk+kernel@arm.linux.org.uk>
-Date: Sat, 12 Jul 2014 10:53:41 +0100
-Subject: [PATCH] drm: omapdrm: fix compiler errors
-Git-commit: 2d31ca3ad7d5d44c8adc7f253c96ce33f3a2e931
-Patch-mainline: 3.17-rc1
-
-Regular randconfig nightly testing has detected problems with omapdrm.
-
-omapdrm fails to build when the kernel is built to support 64-bit DMA
-addresses and/or 64-bit physical addresses due to an assumption about
-the width of these types.
-
-Use %pad to print DMA addresses, rather than %x or %Zx (which is even
-more wrong than %x). Avoid passing a uint32_t pointer into a function
-which expects dma_addr_t pointer.
-
-Drivers/gpu/drm/omapdrm/omap_plane.c: In function 'omap_plane_pre_apply':
-drivers/gpu/drm/omapdrm/omap_plane.c:145:2: error: format '%x' expects argument of type 'unsigned int', but argument 5 has type 'dma_addr_t' [-Werror=format]
-drivers/gpu/drm/omapdrm/omap_plane.c:145:2: error: format '%x' expects argument of type 'unsigned int', but argument 6 has type 'dma_addr_t' [-Werror=format]
-Make[5]: *** [drivers/gpu/drm/omapdrm/omap_plane.o] Error 1
-Drivers/gpu/drm/omapdrm/omap_gem.c: In function 'omap_gem_get_paddr':
-drivers/gpu/drm/omapdrm/omap_gem.c:794:4: error: format '%x' expects argument of type 'unsigned int', but argument 3 has type 'dma_addr_t' [-Werror=format]
-Drivers/gpu/drm/omapdrm/omap_gem.c: In function 'omap_gem_describe':
-drivers/gpu/drm/omapdrm/omap_gem.c:991:4: error: format '%Zx' expects argument of type 'size_t', but argument 7 has type 'dma_addr_t' [-Werror=format]
-Drivers/gpu/drm/omapdrm/omap_gem.c: In function 'omap_gem_init':
-drivers/gpu/drm/omapdrm/omap_gem.c:1470:4: error: format '%x' expects argument of type 'unsigned int', but argument 7 has type 'dma_addr_t' [-Werror=format]
-Make[5]: *** [drivers/gpu/drm/omapdrm/omap_gem.o] Error 1
-Drivers/gpu/drm/omapdrm/omap_dmm_tiler.c: In function 'dmm_txn_append':
-drivers/gpu/drm/omapdrm/omap_dmm_tiler.c:226:2: error: passing argument 3 of 'alloc_dma' from incompatible pointer type [-Werror]
-Make[5]: *** [drivers/gpu/drm/omapdrm/omap_dmm_tiler.o] Error 1
-Make[5]: Target `__build' not remade because of errors.
-Make[4]: *** [drivers/gpu/drm/omapdrm] Error 2
-
-Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-Signed-off-by: Dave Airlie <airlied@redhat.com>
-Acked-by: Takashi Iwai <tiwai@suse.de>
-
----
- drivers/gpu/drm/omapdrm/omap_dmm_tiler.c | 6 ++++--
- drivers/gpu/drm/omapdrm/omap_gem.c | 10 +++++-----
- drivers/gpu/drm/omapdrm/omap_plane.c | 4 ++--
- 3 files changed, 11 insertions(+), 9 deletions(-)
-
---- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
-+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
-@@ -199,7 +199,7 @@ static struct dmm_txn *dmm_txn_init(stru
- static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
- struct page **pages, uint32_t npages, uint32_t roll)
- {
-- dma_addr_t pat_pa = 0;
-+ dma_addr_t pat_pa = 0, data_pa = 0;
- uint32_t *data;
- struct pat *pat;
- struct refill_engine *engine = txn->engine_handle;
-@@ -223,7 +223,9 @@ static void dmm_txn_append(struct dmm_tx
- .lut_id = engine->tcm->lut_id,
- };
-
-- data = alloc_dma(txn, 4*i, &pat->data_pa);
-+ data = alloc_dma(txn, 4*i, &data_pa);
-+ /* FIXME: what if data_pa is more than 32-bit ? */
-+ pat->data_pa = data_pa;
-
- while (i--) {
- int n = i + roll;
---- a/drivers/gpu/drm/omapdrm/omap_gem.c
-+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
-@@ -791,7 +791,7 @@ int omap_gem_get_paddr(struct drm_gem_ob
- omap_obj->paddr = tiler_ssptr(block);
- omap_obj->block = block;
-
-- DBG("got paddr: %08x", omap_obj->paddr);
-+ DBG("got paddr: %pad", &omap_obj->paddr);
- }
-
- omap_obj->paddr_cnt++;
-@@ -985,9 +985,9 @@ void omap_gem_describe(struct drm_gem_ob
-
- off = drm_vma_node_start(&obj->vma_node);
-
-- seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
-+ seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
- omap_obj->flags, obj->name, obj->refcount.refcount.counter,
-- off, omap_obj->paddr, omap_obj->paddr_cnt,
-+ off, &omap_obj->paddr, omap_obj->paddr_cnt,
- omap_obj->vaddr, omap_obj->roll);
-
- if (omap_obj->flags & OMAP_BO_TILED) {
-@@ -1467,8 +1467,8 @@ void omap_gem_init(struct drm_device *de
- entry->paddr = tiler_ssptr(block);
- entry->block = block;
-
-- DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h,
-- entry->paddr,
-+ DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
-+ &entry->paddr,
- usergart[i].stride_pfn << PAGE_SHIFT);
- }
- }
---- a/drivers/gpu/drm/omapdrm/omap_plane.c
-+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
-@@ -142,8 +142,8 @@ static void omap_plane_pre_apply(struct
- DBG("%dx%d -> %dx%d (%d)", info->width, info->height,
- info->out_width, info->out_height,
- info->screen_width);
-- DBG("%d,%d %08x %08x", info->pos_x, info->pos_y,
-- info->paddr, info->p_uv_addr);
-+ DBG("%d,%d %pad %pad", info->pos_x, info->pos_y,
-+ &info->paddr, &info->p_uv_addr);
-
- /* TODO: */
- ilace = false;
diff --git a/patches.fixes/nfs-nfs3_list_one_acl-check-get_acl-result-with-is_err_or_null b/patches.fixes/nfs-nfs3_list_one_acl-check-get_acl-result-with-is_err_or_null
deleted file mode 100644
index c0d7fa5b0b..0000000000
--- a/patches.fixes/nfs-nfs3_list_one_acl-check-get_acl-result-with-is_err_or_null
+++ /dev/null
@@ -1,32 +0,0 @@
-From: Andrey Utkin <andrey.krieger.utkin@gmail.com>
-Subject: nfs: nfs3_list_one_acl(): check get_acl() result with IS_ERR_OR_NULL
-Patch-mainline: Submitted to LKML 24 Jul 2014, reviewed by Trond and HCH
-References: bko#81111 bnc#889790
-
-There was a check for result being not NULL. But get_acl() may return
-NULL, or ERR_PTR, or actual pointer.
-The purpose of the function where current change is done is to "list
-ACLs only when they are available", so any error condition of get_acl()
-mustn't be elevated, and returning 0 there is still valid.
-
-Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=81111
-Signed-off-by: Andrey Utkin <andrey.krieger.utkin@gmail.com>
-Acked-by: Jeff Mahoney <jeffm@suse.com>
----
- fs/nfs/nfs3acl.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
-index 8f854dd..d0fec26 100644
---- a/fs/nfs/nfs3acl.c
-+++ b/fs/nfs/nfs3acl.c
-@@ -256,7 +256,7 @@ nfs3_list_one_acl(struct inode *inode, int type, const char *name, void *data,
- char *p = data + *result;
-
- acl = get_acl(inode, type);
-- if (!acl)
-+ if (IS_ERR_OR_NULL(acl))
- return 0;
-
- posix_acl_release(acl);
-
diff --git a/patches.kernel.org/patch-3.16.1-2 b/patches.kernel.org/patch-3.16.1-2
new file mode 100644
index 0000000000..5a001092dc
--- /dev/null
+++ b/patches.kernel.org/patch-3.16.1-2
@@ -0,0 +1,6076 @@
+From: Jiri Slaby <jslaby@suse.cz>
+Subject: Linux 3.16.2
+Patch-mainline: 3.16.2
+References: bko#81111 bnc#887046 bnc#889790
+Git-commit: a9ef803d740bfadf5e505fbc57efa57692e27025
+Git-commit: 4449a51a7c281602d3a385044ab928322a122a02
+Git-commit: aee7af356e151494d5014f57b33460b162f181b5
+Git-commit: 412f6c4c26fb1eba8844290663837561ac53fa6e
+Git-commit: f87d928f6d98644d39809a013a22f981d39017cf
+Git-commit: 3c45ddf823d679a820adddd53b52c6699c9a05ac
+Git-commit: 71a6ec8ac587418ceb6b420def1ca44b334c1ff7
+Git-commit: 7a9e75a185e6b3a3860e6a26fb6e88691fc2c9d9
+Git-commit: d9499a95716db0d4bc9b67e88fd162133e7d6b08
+Git-commit: dd5f5006d1035547559c8a90781a7e249787a7a2
+Git-commit: bdd405d2a5287bdb9b04670ea255e1f122138e66
+Git-commit: 5cbcc35e5bf0eae3c7494ce3efefffc9977827ae
+Git-commit: 6817ae225cd650fb1c3295d769298c38b1eba818
+Git-commit: 646907f5bfb0782c731ae9ff6fb63471a3566132
+Git-commit: 6552cc7f09261db2aeaae389aa2c05a74b3a93b4
+Git-commit: cc824534d4fef0e46e4486d5c1e10d3c6b1ebadc
+Git-commit: e21eba05afd288a227320f797864ddd859397eed
+Git-commit: 365038d83313951d6ace15342eb24624bbef1666
+Git-commit: 2597fe99bb0259387111d0431691f5daac84f5a5
+Git-commit: 9a54886342e227433aebc9d374f8ae268a836475
+Git-commit: a2fa6721c7237b5a666f16f732628c0c09c0b954
+Git-commit: 8626d524ef08f10fccc0c41e5f75aef8235edf47
+Git-commit: ec0a38bf8b28b036202070cf3ef271e343d9eafc
+Git-commit: e409842a03b0c2c41c0959fef8a7563208af36c1
+Git-commit: db9ee220361de03ee86388f9ea5e529eaad5323c
+Git-commit: 022eaa7517017efe4f6538750c2b59a804dc7df7
+Git-commit: d80d448c6c5bdd32605b78a60fe8081d82d4da0f
+Git-commit: 6603120e96eae9a5d6228681ae55c7fdc998d1bb
+Git-commit: c174e6d6979a04b7b77b93f244396be4b81f8bfb
+Git-commit: 69dc9536405213c1d545fcace1fc15c481d00aae
+Git-commit: 4631dbf677ded0419fee35ca7408285dabfaef1a
+Git-commit: 8e8248b1369c97c7bb6f8bcaee1f05deeabab8ef
+Git-commit: 73ab4232388b7a08f17c8d08141ff2099fa0b161
+Git-commit: 9e0af23764344f7f1b68e4eefbe7dc865018b63d
+Git-commit: f6dc45c7a93a011dff6eb9b2ffda59c390c7705a
+Git-commit: 38c1c2e44bacb37efd68b90b3f70386a8ee370ee
+Git-commit: 8d875f95da43c6a8f18f77869f2ef26e9594fecc
+Git-commit: ce62003f690dff38d3164a632ec69efa15c32cbf
+Git-commit: 6f7ff6d7832c6be13e8c95598884dbc40ad69fb7
+Git-commit: 27b9a8122ff71a8cadfbffb9c4f0694300464f3b
+Git-commit: 4eb1f66dce6c4dc28dd90a7ffbe6b2b1cb08aa4e
+Git-commit: 0758f4f732b08b6ef07f2e5f735655cf69fea477
+Git-commit: b38af4721f59d0b564468f623b3e52a638195015
+Git-commit: 8d5999df35314607c38fbd6bdd709e25c3a4eeab
+Git-commit: 7d951f3ccb0308c95bf76d5eef9886dea35a7013
+Git-commit: 7b2a583afb4ab894f78bc0f8bd136e96b6499a7e
+Git-commit: dcecb8fd93a65787130a74e61fdf29932c8d85eb
+Git-commit: ed5c41d30ef2ce578fd6b6e2f7ec23f2a58b1eba
+Git-commit: 0b9e7b741f2bf8103b15bb14d5b4a6f5ee91c59a
+Git-commit: 53b884ac3745353de220d92ef792515c3ae692f0
+Git-commit: a32305bf90a2ae0e6a9a93370c7616565f75e15a
+Git-commit: 7340056567e32b2c9d3554eb146e1977c93da116
+Git-commit: 9e5c6e5a3be0b2e17ff61b9b74adef4a2c9e6934
+Git-commit: cbace46a9710a480cae51e4611697df5de41713e
+Git-commit: dcfa9be83866e28fcb8b7e22b4eeb4ba63bd3174
+Git-commit: 0d25d35c987d7b0b63368d9c1ae35a917e1a7bab
+Git-commit: c33377082dd9ede1e998f7ce416077e4b1c2276c
+Git-commit: 1f6ae47ecff7f23da73417e068018b311f3b5583
+Git-commit: 37dbeab788a8f23fd946c0be083e5484d6f929a1
+Git-commit: 5fc540edc8ea1297c76685f74bc82a2107fe6731
+Git-commit: 6dc14baf4ced769017c7a7045019c7a19f373865
+Git-commit: c99d1e6e83b06744c75d9f5e491ed495a7086b7b
+Git-commit: 350b8bdd689cd2ab2c67c8a86a0be86cfa0751a7
+Git-commit: 0d234daf7e0a3290a3a20c8087eefbd6335a5bd4
+Git-commit: 56cc2406d68c0f09505c389e276f27a99f495cbd
+Git-commit: a0840240c0c6bcbac8f0f5db11f95c19aaf9b52f
+Git-commit: 55e4283c3eb1d850893f645dd695c9c75d5fa1fc
+Git-commit: 0f6c0a740b7d3e1f3697395922d674000f83d060
+Git-commit: 9e8919ae793f4edfaa29694a70f71a515ae9942a
+Git-commit: 485d44022a152c0254dd63445fdb81c4194cbf0e
+Git-commit: e1f8859ee265fc89bd21b4dca79e8e983a044892
+Git-commit: ae84db9661cafc63d179e1d985a2c5b841ff0ac4
+Git-commit: 86f0afd463215fc3e58020493482faa4ac3a4d69
+Git-commit: 4f579ae7de560e5f449587a6c3f02594d53d4d51
+Git-commit: 9c5f7cad3acc69ce623d04d646950183a759949e
+Git-commit: 28772ac8711e4d7268c06e765887dd8cb6924f98
+Git-commit: f07a5e9a331045e976a3d317ba43d14859d9407c
+Git-commit: 5b963089161b8fb244889c972edf553b9d737545
+Git-commit: d58e47d787c09fe5c61af3c6ce7d784762f29c3d
+Git-commit: e981429557cbe10c780fab1c1a237cb832757652
+Git-commit: 3248c3b771ddd9d31695da17ba350eb6e1b80a53
+Git-commit: 56de1377ad92f72ee4e5cb0faf7a9b6048fdf0bf
+Git-commit: 2565fb05d1e9fc0831f7b1c083bcfcb1cba1f020
+Git-commit: 1074d683a51f1aded3562add9ef313e75d557327
+Git-commit: cf44819c98db11163f58f08b822d626c7a8f5188
+Git-commit: cc336546ddca8c22de83720632431c16a5f9fe9a
+Git-commit: ad82bfea44835da9633548e2031a1af4a9965c14
+Git-commit: 1cb9da502835dad73dda772b20c1e792f4e71589
+Git-commit: 2d31ca3ad7d5d44c8adc7f253c96ce33f3a2e931
+Git-commit: 9b5f7428f8b16bd8980213f2b70baf1dd0b9e36c
+Git-commit: bc994c77ce82576209dcf08f71de9ae51b0b100f
+Git-commit: 44e6ab1b619853f05bf7250e55a6d82864e340d7
+Git-commit: 6a7519e81321343165f89abb8b616df186d3e57a
+Git-commit: c878e0cff5c5e56b216951cbe75f7a3dd500a736
+Git-commit: 7f0b1bf04511348995d6fce38c87c98a3b5cb781
+Git-commit: d8d28c8f00e84a72e8bee39a85835635417bee49
+Git-commit: 8f873c1ff4ca034626093d03b254e7cb8bb782dd
+Git-commit: fe2f17eb3da38ac0d5a00c511255bf3a33d16d24
+Git-commit: d5d83f8abea13d0b50ee762276c6c900d1946264
+Git-commit: 22b987a325701223f9a37db700c6eb20b9924c6f
+Git-commit: 3e37ebb7183f0c4eb92a88c60657ac319c01b3e9
+Git-commit: f3ee07d8b6e061bf34a7167c3f564e8da4360a99
+Git-commit: f475371aa65de84fa483a998ab7594531026b9d9
+Git-commit: 423044744aa4c250058e976474856a7a41972182
+Git-commit: 53da5ebfef66ea6e478ad9c6add3781472b79475
+Git-commit: e24aa0a4c5ac92a171d9dd74a8d3dbf652990d36
+Git-commit: f42bb22243d2ae264d721b055f836059fe35321f
+Git-commit: 542baf94ec3c5526955b4c9fd899c7f30fae4ebe
+Git-commit: 7440850c20b69658f322119d20a94dc914127cc7
+Git-commit: a40178b2fa6ad87670fb1e5fa4024db00c149629
+Git-commit: 6e693739e9b603b3ca9ce0d4f4178f0633458465
+Git-commit: 4bdcde358b4bda74e356841d351945ca3f2245dd
+Git-commit: 9273b8a270878906540349422ab24558b9d65716
+Git-commit: d310d05f1225d1f6f2bf505255fdf593bfbb3051
+Git-commit: 5ee0f803cc3a0738a63288e4a2f453c85889fbda
+Git-commit: 977dcfdc60311e7aa571cabf6f39c36dde13339e
+Git-commit: 256dbcd80f1ccf8abf421c1d72ba79a4e29941dd
+Git-commit: e2875c33787ebda21aeecc1a9d3ff52b3aa413ec
+Git-commit: 410dd3cf4c9b36f27ed4542ee18b1af5e68645a4
+Git-commit: 4ab25786c87eb20857bbb715c3ae34ec8fd6a214
+Git-commit: ad3e14d7c5268c2e24477c6ef54bbdf88add5d36
+Git-commit: 51217e69697fba92a06e07e16f55c9a52d8e8945
+Git-commit: c3b9b945e02e011c63522761e91133ea43eb6939
+Git-commit: b76fc285337b6b256e9ba20a40cfd043f70c27af
+
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
+index 7ccf933bfbe0..48148d6d9307 100644
+--- a/Documentation/sound/alsa/ALSA-Configuration.txt
++++ b/Documentation/sound/alsa/ALSA-Configuration.txt
+@@ -2026,8 +2026,8 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
+ -------------------
+
+ Module for sound cards based on the Asus AV66/AV100/AV200 chips,
+- i.e., Xonar D1, DX, D2, D2X, DS, Essence ST (Deluxe), Essence STX,
+- HDAV1.3 (Deluxe), and HDAV1.3 Slim.
++ i.e., Xonar D1, DX, D2, D2X, DS, DSX, Essence ST (Deluxe),
++ Essence STX (II), HDAV1.3 (Deluxe), and HDAV1.3 Slim.
+
+ This module supports autoprobe and multiple cards.
+
+diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
+index cbc2f03056bd..aee73e78c7d4 100644
+--- a/Documentation/stable_kernel_rules.txt
++++ b/Documentation/stable_kernel_rules.txt
+@@ -29,6 +29,9 @@ Rules on what kind of patches are accepted, and which ones are not, into the
+
+ Procedure for submitting patches to the -stable tree:
+
++ - If the patch covers files in net/ or drivers/net please follow netdev stable
++ submission guidelines as described in
++ Documentation/networking/netdev-FAQ.txt
+ - Send the patch, after verifying that it follows the above rules, to
+ stable@vger.kernel.org. You must note the upstream commit ID in the
+ changelog of your submission, as well as the kernel version you wish
+diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
+index 0fe36497642c..612e6e99d1e5 100644
+--- a/Documentation/virtual/kvm/api.txt
++++ b/Documentation/virtual/kvm/api.txt
+@@ -1869,7 +1869,8 @@ registers, find a list below:
+ PPC | KVM_REG_PPC_PID | 64
+ PPC | KVM_REG_PPC_ACOP | 64
+ PPC | KVM_REG_PPC_VRSAVE | 32
+- PPC | KVM_REG_PPC_LPCR | 64
++ PPC | KVM_REG_PPC_LPCR | 32
++ PPC | KVM_REG_PPC_LPCR_64 | 64
+ PPC | KVM_REG_PPC_PPR | 64
+ PPC | KVM_REG_PPC_ARCH_COMPAT 32
+ PPC | KVM_REG_PPC_DABRX | 32
+diff --git a/Makefile b/Makefile
+index 87663a2d1d10..c2617526e605 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 16
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Museum of Fishiegoodies
+
+diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
+index 49fa59622254..c9aee0e799bb 100644
+--- a/arch/arm/boot/dts/am4372.dtsi
++++ b/arch/arm/boot/dts/am4372.dtsi
+@@ -168,9 +168,6 @@
+ ti,hwmods = "mailbox";
+ ti,mbox-num-users = <4>;
+ ti,mbox-num-fifos = <8>;
+- ti,mbox-names = "wkup_m3";
+- ti,mbox-data = <0 0 0 0>;
+- status = "disabled";
+ };
+
+ timer1: timer@44e31000 {
+diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
+index 43876245fc57..21ca0cebcab0 100644
+--- a/arch/arm/include/asm/unistd.h
++++ b/arch/arm/include/asm/unistd.h
+@@ -15,7 +15,17 @@
+
+ #include <uapi/asm/unistd.h>
+
++/*
++ * This may need to be greater than __NR_last_syscall+1 in order to
++ * account for the padding in the syscall table
++ */
+ #define __NR_syscalls (384)
++
++/*
++ * *NOTE*: This is a ghost syscall private to the kernel. Only the
++ * __kuser_cmpxchg code in entry-armv.S should be aware of its
++ * existence. Don't ever use this from user code.
++ */
+ #define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0)
+
+ #define __ARCH_WANT_STAT64
+diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
+index ba94446c72d9..acd5b66ea3aa 100644
+--- a/arch/arm/include/uapi/asm/unistd.h
++++ b/arch/arm/include/uapi/asm/unistd.h
+@@ -411,11 +411,6 @@
+ #define __NR_renameat2 (__NR_SYSCALL_BASE+382)
+
+ /*
+- * This may need to be greater than __NR_last_syscall+1 in order to
+- * account for the padding in the syscall table
+- */
+-
+-/*
+ * The following SWIs are ARM private.
+ */
+ #define __ARM_NR_BASE (__NR_SYSCALL_BASE+0x0f0000)
+@@ -426,12 +421,6 @@
+ #define __ARM_NR_set_tls (__ARM_NR_BASE+5)
+
+ /*
+- * *NOTE*: This is a ghost syscall private to the kernel. Only the
+- * __kuser_cmpxchg code in entry-armv.S should be aware of its
+- * existence. Don't ever use this from user code.
+- */
+-
+-/*
+ * The following syscalls are obsolete and no longer available for EABI.
+ */
+ #if !defined(__KERNEL__)
+diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c
+index 751f3549bf6f..acadac0992b6 100644
+--- a/arch/arm/mach-omap2/control.c
++++ b/arch/arm/mach-omap2/control.c
+@@ -314,7 +314,8 @@ void omap3_save_scratchpad_contents(void)
+ scratchpad_contents.public_restore_ptr =
+ virt_to_phys(omap3_restore_3630);
+ else if (omap_rev() != OMAP3430_REV_ES3_0 &&
+- omap_rev() != OMAP3430_REV_ES3_1)
++ omap_rev() != OMAP3430_REV_ES3_1 &&
++ omap_rev() != OMAP3430_REV_ES3_1_2)
+ scratchpad_contents.public_restore_ptr =
+ virt_to_phys(omap3_restore);
+ else
+diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
+index 6c074f37cdd2..da1b256caccc 100644
+--- a/arch/arm/mach-omap2/omap_hwmod.c
++++ b/arch/arm/mach-omap2/omap_hwmod.c
+@@ -2185,6 +2185,8 @@ static int _enable(struct omap_hwmod *oh)
+ oh->mux->pads_dynamic))) {
+ omap_hwmod_mux(oh->mux, _HWMOD_STATE_ENABLED);
+ _reconfigure_io_chain();
++ } else if (oh->flags & HWMOD_FORCE_MSTANDBY) {
++ _reconfigure_io_chain();
+ }
+
+ _add_initiator_dep(oh, mpu_oh);
+@@ -2291,6 +2293,8 @@ static int _idle(struct omap_hwmod *oh)
+ if (oh->mux && oh->mux->pads_dynamic) {
+ omap_hwmod_mux(oh->mux, _HWMOD_STATE_IDLE);
+ _reconfigure_io_chain();
++ } else if (oh->flags & HWMOD_FORCE_MSTANDBY) {
++ _reconfigure_io_chain();
+ }
+
+ oh->_state = _HWMOD_STATE_IDLE;
+diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
+index a5176cf32dad..f2defe1c380c 100644
+--- a/arch/arm64/include/asm/cacheflush.h
++++ b/arch/arm64/include/asm/cacheflush.h
+@@ -138,19 +138,10 @@ static inline void __flush_icache_all(void)
+ #define flush_icache_page(vma,page) do { } while (0)
+
+ /*
+- * flush_cache_vmap() is used when creating mappings (eg, via vmap,
+- * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
+- * caches, since the direct-mappings of these pages may contain cached
+- * data, we need to do a full cache flush to ensure that writebacks
+- * don't corrupt data placed into these pages via the new mappings.
++ * Not required on AArch64 (PIPT or VIPT non-aliasing D-cache).
+ */
+ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
+ {
+- /*
+- * set_pte_at() called from vmap_pte_range() does not
+- * have a DSB after cleaning the cache line.
+- */
+- dsb(ish);
+ }
+
+ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index e0ccceb317d9..2a1508cdead0 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -138,6 +138,8 @@ extern struct page *empty_zero_page;
+
+ #define pte_valid_user(pte) \
+ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
++#define pte_valid_not_user(pte) \
++ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
+
+ static inline pte_t pte_wrprotect(pte_t pte)
+ {
+@@ -184,6 +186,15 @@ static inline pte_t pte_mkspecial(pte_t pte)
+ static inline void set_pte(pte_t *ptep, pte_t pte)
+ {
+ *ptep = pte;
++
++ /*
++ * Only if the new pte is valid and kernel, otherwise TLB maintenance
++ * or update_mmu_cache() have the necessary barriers.
++ */
++ if (pte_valid_not_user(pte)) {
++ dsb(ishst);
++ isb();
++ }
+ }
+
+ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
+@@ -303,6 +314,7 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
+ *pmdp = pmd;
+ dsb(ishst);
++ isb();
+ }
+
+ static inline void pmd_clear(pmd_t *pmdp)
+@@ -333,6 +345,7 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
+ {
+ *pudp = pud;
+ dsb(ishst);
++ isb();
+ }
+
+ static inline void pud_clear(pud_t *pudp)
+diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
+index b9349c4513ea..3796ea6bb734 100644
+--- a/arch/arm64/include/asm/tlbflush.h
++++ b/arch/arm64/include/asm/tlbflush.h
+@@ -122,6 +122,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
+ for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
+ asm("tlbi vaae1is, %0" : : "r"(addr));
+ dsb(ish);
++ isb();
+ }
+
+ /*
+@@ -131,8 +132,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+ {
+ /*
+- * set_pte() does not have a DSB, so make sure that the page table
+- * write is visible.
++ * set_pte() does not have a DSB for user mappings, so make sure that
++ * the page table write is visible.
+ */
+ dsb(ishst);
+ }
+diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
+index a7fb874b595e..fe5b94078d82 100644
+--- a/arch/arm64/kernel/debug-monitors.c
++++ b/arch/arm64/kernel/debug-monitors.c
+@@ -315,20 +315,20 @@ static int brk_handler(unsigned long addr, unsigned int esr,
+ {
+ siginfo_t info;
+
+- if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
+- return 0;
++ if (user_mode(regs)) {
++ info = (siginfo_t) {
++ .si_signo = SIGTRAP,
++ .si_errno = 0,
++ .si_code = TRAP_BRKPT,
++ .si_addr = (void __user *)instruction_pointer(regs),
++ };
+
+- if (!user_mode(regs))
++ force_sig_info(SIGTRAP, &info, current);
++ } else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) {
++ pr_warning("Unexpected kernel BRK exception at EL1\n");
+ return -EFAULT;
++ }
+
+- info = (siginfo_t) {
+- .si_signo = SIGTRAP,
+- .si_errno = 0,
+- .si_code = TRAP_BRKPT,
+- .si_addr = (void __user *)instruction_pointer(regs),
+- };
+-
+- force_sig_info(SIGTRAP, &info, current);
+ return 0;
+ }
+
+diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
+index 14db1f6e8d7f..c0aead7d1a72 100644
+--- a/arch/arm64/kernel/efi.c
++++ b/arch/arm64/kernel/efi.c
+@@ -464,6 +464,8 @@ static int __init arm64_enter_virtual_mode(void)
+
+ set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+
++ efi.runtime_version = efi.systab->hdr.revision;
++
+ return 0;
+ }
+ early_initcall(arm64_enter_virtual_mode);
+diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
+index 736c17a226e9..bf0fc6b16ad9 100644
+--- a/arch/mips/math-emu/cp1emu.c
++++ b/arch/mips/math-emu/cp1emu.c
+@@ -1827,7 +1827,7 @@ dcopuop:
+ case -1:
+
+ if (cpu_has_mips_4_5_r)
+- cbit = fpucondbit[MIPSInst_RT(ir) >> 2];
++ cbit = fpucondbit[MIPSInst_FD(ir) >> 2];
+ else
+ cbit = FPU_CSR_COND;
+ if (rv.w)
+diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
+index 2bc4a9409a93..de7d426a9b0c 100644
+--- a/arch/powerpc/include/uapi/asm/kvm.h
++++ b/arch/powerpc/include/uapi/asm/kvm.h
+@@ -548,6 +548,7 @@ struct kvm_get_htab_header {
+
+ #define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4)
+ #define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5)
++#define KVM_REG_PPC_LPCR_64 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb5)
+ #define KVM_REG_PPC_PPR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb6)
+
+ /* Architecture compatibility level */
+diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
+index fbd01eba4473..94802d267022 100644
+--- a/arch/powerpc/kernel/eeh_pe.c
++++ b/arch/powerpc/kernel/eeh_pe.c
+@@ -802,53 +802,33 @@ void eeh_pe_restore_bars(struct eeh_pe *pe)
+ */
+ const char *eeh_pe_loc_get(struct eeh_pe *pe)
+ {
+- struct pci_controller *hose;
+ struct pci_bus *bus = eeh_pe_bus_get(pe);
+- struct pci_dev *pdev;
+- struct device_node *dn;
+- const char *loc;
++ struct device_node *dn = pci_bus_to_OF_node(bus);
++ const char *loc = NULL;
+
+- if (!bus)
+- return "N/A";
++ if (!dn)
++ goto out;
+
+ /* PHB PE or root PE ? */
+ if (pci_is_root_bus(bus)) {
+- hose = pci_bus_to_host(bus);
+- loc = of_get_property(hose->dn,
+- "ibm,loc-code", NULL);
+- if (loc)
+- return loc;
+- loc = of_get_property(hose->dn,
+- "ibm,io-base-loc-code", NULL);
++ loc = of_get_property(dn, "ibm,loc-code", NULL);
++ if (!loc)
++ loc = of_get_property(dn, "ibm,io-base-loc-code", NULL);
+ if (loc)
+- return loc;
+-
+- pdev = pci_get_slot(bus, 0x0);
+- } else {
+- pdev = bus->self;
+- }
+-
+- if (!pdev) {
+- loc = "N/A";
+- goto out;
+- }
++ goto out;
+
+- dn = pci_device_to_OF_node(pdev);
+- if (!dn) {
+- loc = "N/A";
+- goto out;
++ /* Check the root port */
++ dn = dn->child;
++ if (!dn)
++ goto out;
+ }
+
+ loc = of_get_property(dn, "ibm,loc-code", NULL);
+ if (!loc)
+ loc = of_get_property(dn, "ibm,slot-location-code", NULL);
+- if (!loc)
+- loc = "N/A";
+
+ out:
+- if (pci_is_root_bus(bus) && pdev)
+- pci_dev_put(pdev);
+- return loc;
++ return loc ? loc : "N/A";
+ }
+
+ /**
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index 7a12edbb61e7..0f3a19237444 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -785,7 +785,8 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
+ return 0;
+ }
+
+-static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr)
++static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
++ bool preserve_top32)
+ {
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ u64 mask;
+@@ -820,6 +821,10 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr)
+ mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ mask |= LPCR_AIL;
++
++ /* Broken 32-bit version of LPCR must not clear top bits */
++ if (preserve_top32)
++ mask &= 0xFFFFFFFF;
+ vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
+ spin_unlock(&vc->lock);
+ }
+@@ -939,6 +944,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
+ *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
+ break;
+ case KVM_REG_PPC_LPCR:
++ case KVM_REG_PPC_LPCR_64:
+ *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
+ break;
+ case KVM_REG_PPC_PPR:
+@@ -1150,7 +1156,10 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
+ ALIGN(set_reg_val(id, *val), 1UL << 24);
+ break;
+ case KVM_REG_PPC_LPCR:
+- kvmppc_set_lpcr(vcpu, set_reg_val(id, *val));
++ kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true);
++ break;
++ case KVM_REG_PPC_LPCR_64:
++ kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false);
+ break;
+ case KVM_REG_PPC_PPR:
+ vcpu->arch.ppr = set_reg_val(id, *val);
+diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
+index 8eef1e519077..66b7afec250f 100644
+--- a/arch/powerpc/kvm/book3s_pr.c
++++ b/arch/powerpc/kvm/book3s_pr.c
+@@ -1233,6 +1233,7 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
+ *val = get_reg_val(id, to_book3s(vcpu)->hior);
+ break;
+ case KVM_REG_PPC_LPCR:
++ case KVM_REG_PPC_LPCR_64:
+ /*
+ * We are only interested in the LPCR_ILE bit
+ */
+@@ -1268,6 +1269,7 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
+ to_book3s(vcpu)->hior_explicit = true;
+ break;
+ case KVM_REG_PPC_LPCR:
++ case KVM_REG_PPC_LPCR_64:
+ kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
+ break;
+ default:
+diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
+index de19edeaa7a7..3136ae2f75af 100644
+--- a/arch/powerpc/platforms/powernv/pci-ioda.c
++++ b/arch/powerpc/platforms/powernv/pci-ioda.c
+@@ -491,6 +491,7 @@ static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
+ set_dma_ops(&pdev->dev, &dma_iommu_ops);
+ set_iommu_table_base(&pdev->dev, &pe->tce32_table);
+ }
++ *pdev->dev.dma_mask = dma_mask;
+ return 0;
+ }
+
+diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
+index 203cbf0dc101..89e23811199c 100644
+--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
++++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
+@@ -118,10 +118,10 @@ int remove_phb_dynamic(struct pci_controller *phb)
+ }
+ }
+
+- /* Unregister the bridge device from sysfs and remove the PCI bus */
+- device_unregister(b->bridge);
++ /* Remove the PCI bus and unregister the bridge device from sysfs */
+ phb->bus = NULL;
+ pci_remove_bus(b);
++ device_unregister(b->bridge);
+
+ /* Now release the IO resource */
+ if (res->flags & IORESOURCE_IO)
+diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
+index 37b8241ec784..f90ad8592b36 100644
+--- a/arch/s390/mm/pgtable.c
++++ b/arch/s390/mm/pgtable.c
+@@ -1279,6 +1279,7 @@ static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb,
+ {
+ unsigned long next, *table, *new;
+ struct page *page;
++ spinlock_t *ptl;
+ pmd_t *pmd;
+
+ pmd = pmd_offset(pud, addr);
+@@ -1296,7 +1297,7 @@ again:
+ if (!new)
+ return -ENOMEM;
+
+- spin_lock(&mm->page_table_lock);
++ ptl = pmd_lock(mm, pmd);
+ if (likely((unsigned long *) pmd_deref(*pmd) == table)) {
+ /* Nuke pmd entry pointing to the "short" page table */
+ pmdp_flush_lazy(mm, addr, pmd);
+@@ -1310,7 +1311,7 @@ again:
+ page_table_free_rcu(tlb, table);
+ new = NULL;
+ }
+- spin_unlock(&mm->page_table_lock);
++ spin_unlock(ptl);
+ if (new) {
+ page_table_free_pgste(new);
+ goto again;
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index d24887b645dc..27adfd902c6f 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -1537,6 +1537,7 @@ config EFI
+ config EFI_STUB
+ bool "EFI stub support"
+ depends on EFI
++ select RELOCATABLE
+ ---help---
+ This kernel feature allows a bzImage to be loaded directly
+ by EFI firmware without the use of a bootloader.
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 49205d01b9ad..9f83c171ac18 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -95,7 +95,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
+ #define KVM_REFILL_PAGES 25
+ #define KVM_MAX_CPUID_ENTRIES 80
+ #define KVM_NR_FIXED_MTRR_REGION 88
+-#define KVM_NR_VAR_MTRR 10
++#define KVM_NR_VAR_MTRR 8
+
+ #define ASYNC_PF_PER_VCPU 64
+
+diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
+index 0ec056012618..aa97a070f09f 100644
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -131,8 +131,13 @@ static inline int pte_exec(pte_t pte)
+
+ static inline int pte_special(pte_t pte)
+ {
+- return (pte_flags(pte) & (_PAGE_PRESENT|_PAGE_SPECIAL)) ==
+- (_PAGE_PRESENT|_PAGE_SPECIAL);
++ /*
++ * See CONFIG_NUMA_BALANCING pte_numa in include/asm-generic/pgtable.h.
++ * On x86 we have _PAGE_BIT_NUMA == _PAGE_BIT_GLOBAL+1 ==
++ * __PAGE_BIT_SOFTW1 == _PAGE_BIT_SPECIAL.
++ */
++ return (pte_flags(pte) & _PAGE_SPECIAL) &&
++ (pte_flags(pte) & (_PAGE_PRESENT|_PAGE_PROTNONE));
+ }
+
+ static inline unsigned long pte_pfn(pte_t pte)
+diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
+index 9a316b21df8b..3bdb95ae8c43 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
++++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
+@@ -42,7 +42,7 @@ static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
+ * cmci_discover_lock protects against parallel discovery attempts
+ * which could race against each other.
+ */
+-static DEFINE_SPINLOCK(cmci_discover_lock);
++static DEFINE_RAW_SPINLOCK(cmci_discover_lock);
+
+ #define CMCI_THRESHOLD 1
+ #define CMCI_POLL_INTERVAL (30 * HZ)
+@@ -144,14 +144,14 @@ static void cmci_storm_disable_banks(void)
+ int bank;
+ u64 val;
+
+- spin_lock_irqsave(&cmci_discover_lock, flags);
++ raw_spin_lock_irqsave(&cmci_discover_lock, flags);
+ owned = __get_cpu_var(mce_banks_owned);
+ for_each_set_bit(bank, owned, MAX_NR_BANKS) {
+ rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
+ val &= ~MCI_CTL2_CMCI_EN;
+ wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
+ }
+- spin_unlock_irqrestore(&cmci_discover_lock, flags);
++ raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
+ }
+
+ static bool cmci_storm_detect(void)
+@@ -211,7 +211,7 @@ static void cmci_discover(int banks)
+ int i;
+ int bios_wrong_thresh = 0;
+
+- spin_lock_irqsave(&cmci_discover_lock, flags);
++ raw_spin_lock_irqsave(&cmci_discover_lock, flags);
+ for (i = 0; i < banks; i++) {
+ u64 val;
+ int bios_zero_thresh = 0;
+@@ -266,7 +266,7 @@ static void cmci_discover(int banks)
+ WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
+ }
+ }
+- spin_unlock_irqrestore(&cmci_discover_lock, flags);
++ raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
+ if (mca_cfg.bios_cmci_threshold && bios_wrong_thresh) {
+ pr_info_once(
+ "bios_cmci_threshold: Some banks do not have valid thresholds set\n");
+@@ -316,10 +316,10 @@ void cmci_clear(void)
+
+ if (!cmci_supported(&banks))
+ return;
+- spin_lock_irqsave(&cmci_discover_lock, flags);
++ raw_spin_lock_irqsave(&cmci_discover_lock, flags);
+ for (i = 0; i < banks; i++)
+ __cmci_disable_bank(i);
+- spin_unlock_irqrestore(&cmci_discover_lock, flags);
++ raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
+ }
+
+ static void cmci_rediscover_work_func(void *arg)
+@@ -360,9 +360,9 @@ void cmci_disable_bank(int bank)
+ if (!cmci_supported(&banks))
+ return;
+
+- spin_lock_irqsave(&cmci_discover_lock, flags);
++ raw_spin_lock_irqsave(&cmci_discover_lock, flags);
+ __cmci_disable_bank(bank);
+- spin_unlock_irqrestore(&cmci_discover_lock, flags);
++ raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
+ }
+
+ static void intel_init_cmci(void)
+diff --git a/arch/x86/kernel/resource.c b/arch/x86/kernel/resource.c
+index 2a26819bb6a8..80eab01c1a68 100644
+--- a/arch/x86/kernel/resource.c
++++ b/arch/x86/kernel/resource.c
+@@ -37,10 +37,12 @@ static void remove_e820_regions(struct resource *avail)
+
+ void arch_remove_reservations(struct resource *avail)
+ {
+- /* Trim out BIOS areas (low 1MB and high 2MB) and E820 regions */
++ /*
++ * Trim out BIOS area (high 2MB) and E820 regions. We do not remove
++ * the low 1MB unconditionally, as this area is needed for some ISA
++ * cards requiring a memory range, e.g. the i82365 PCMCIA controller.
++ */
+ if (avail->flags & IORESOURCE_MEM) {
+- if (avail->start < BIOS_END)
+- avail->start = BIOS_END;
+ resource_clip(avail, BIOS_ROM_BASE, BIOS_ROM_END);
+
+ remove_e820_regions(avail);
+diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
+index ea5b5709aa76..e1e1e80fc6a6 100644
+--- a/arch/x86/kernel/vsyscall_64.c
++++ b/arch/x86/kernel/vsyscall_64.c
+@@ -81,10 +81,10 @@ static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
+ if (!show_unhandled_signals)
+ return;
+
+- pr_notice_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
+- level, current->comm, task_pid_nr(current),
+- message, regs->ip, regs->cs,
+- regs->sp, regs->ax, regs->si, regs->di);
++ printk_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
++ level, current->comm, task_pid_nr(current),
++ message, regs->ip, regs->cs,
++ regs->sp, regs->ax, regs->si, regs->di);
+ }
+
+ static int addr_to_vsyscall_nr(unsigned long addr)
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index e4e833d3d7d7..2d3b8d0efa0f 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -2017,6 +2017,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
+ {
+ int rc;
+ unsigned long cs;
++ int cpl = ctxt->ops->cpl(ctxt);
+
+ rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
+ if (rc != X86EMUL_CONTINUE)
+@@ -2026,6 +2027,9 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
+ rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
++ /* Outer-privilege level return is not implemented */
++ if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
++ return X86EMUL_UNHANDLEABLE;
+ rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
+ return rc;
+ }
+diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
+index bd0da433e6d7..a1ec6a50a05a 100644
+--- a/arch/x86/kvm/irq.c
++++ b/arch/x86/kvm/irq.c
+@@ -108,7 +108,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
+
+ vector = kvm_cpu_get_extint(v);
+
+- if (kvm_apic_vid_enabled(v->kvm) || vector != -1)
++ if (vector != -1)
+ return vector; /* PIC */
+
+ return kvm_get_apic_interrupt(v); /* APIC */
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 006911858174..453e5fbbb7ae 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -352,25 +352,46 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic)
+
+ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
+ {
+- apic->irr_pending = false;
++ struct kvm_vcpu *vcpu;
++
++ vcpu = apic->vcpu;
++
+ apic_clear_vector(vec, apic->regs + APIC_IRR);
+- if (apic_search_irr(apic) != -1)
+- apic->irr_pending = true;
++ if (unlikely(kvm_apic_vid_enabled(vcpu->kvm)))
++ /* try to update RVI */
++ kvm_make_request(KVM_REQ_EVENT, vcpu);
++ else {
++ vec = apic_search_irr(apic);
++ apic->irr_pending = (vec != -1);
++ }
+ }
+
+ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
+ {
+- /* Note that we never get here with APIC virtualization enabled. */
++ struct kvm_vcpu *vcpu;
++
++ if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
++ return;
++
++ vcpu = apic->vcpu;
+
+- if (!__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
+- ++apic->isr_count;
+- BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
+ /*
+- * ISR (in service register) bit is set when injecting an interrupt.
+- * The highest vector is injected. Thus the latest bit set matches
+- * the highest bit in ISR.
++ * With APIC virtualization enabled, all caching is disabled
++ * because the processor can modify ISR under the hood. Instead
++ * just set SVI.
+ */
+- apic->highest_isr_cache = vec;
++ if (unlikely(kvm_apic_vid_enabled(vcpu->kvm)))
++ kvm_x86_ops->hwapic_isr_update(vcpu->kvm, vec);
++ else {
++ ++apic->isr_count;
++ BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
++ /*
++ * ISR (in service register) bit is set when injecting an interrupt.
++ * The highest vector is injected. Thus the latest bit set matches
++ * the highest bit in ISR.
++ */
++ apic->highest_isr_cache = vec;
++ }
+ }
+
+ static inline int apic_find_highest_isr(struct kvm_lapic *apic)
+@@ -1627,11 +1648,16 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
+ int vector = kvm_apic_has_interrupt(vcpu);
+ struct kvm_lapic *apic = vcpu->arch.apic;
+
+- /* Note that we never get here with APIC virtualization enabled. */
+-
+ if (vector == -1)
+ return -1;
+
++ /*
++ * We get here even with APIC virtualization enabled, if doing
++ * nested virtualization and L1 runs with the "acknowledge interrupt
++ * on exit" mode. Then we cannot inject the interrupt via RVI,
++ * because the process would deliver it through the IDT.
++ */
++
+ apic_set_isr(vector, apic);
+ apic_update_ppr(apic);
+ apic_clear_irr(vector, apic);
+diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
+index a19ed92e74e4..2ae525e0d8ba 100644
+--- a/arch/x86/pci/i386.c
++++ b/arch/x86/pci/i386.c
+@@ -162,6 +162,10 @@ pcibios_align_resource(void *data, const struct resource *res,
+ return start;
+ if (start & 0x300)
+ start = (start + 0x3ff) & ~0x3ff;
++ } else if (res->flags & IORESOURCE_MEM) {
++ /* The low 1MB range is reserved for ISA cards */
++ if (start < BIOS_END)
++ start = BIOS_END;
+ }
+ return start;
+ }
+diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c
+index ebfa9b2c871d..767c9cbb869f 100644
+--- a/arch/x86/xen/grant-table.c
++++ b/arch/x86/xen/grant-table.c
+@@ -168,6 +168,7 @@ static int __init xlated_setup_gnttab_pages(void)
+ {
+ struct page **pages;
+ xen_pfn_t *pfns;
++ void *vaddr;
+ int rc;
+ unsigned int i;
+ unsigned long nr_grant_frames = gnttab_max_grant_frames();
+@@ -193,21 +194,20 @@ static int __init xlated_setup_gnttab_pages(void)
+ for (i = 0; i < nr_grant_frames; i++)
+ pfns[i] = page_to_pfn(pages[i]);
+
+- rc = arch_gnttab_map_shared(pfns, nr_grant_frames, nr_grant_frames,
+- &xen_auto_xlat_grant_frames.vaddr);
+-
+- if (rc) {
++ vaddr = vmap(pages, nr_grant_frames, 0, PAGE_KERNEL);
++ if (!vaddr) {
+ pr_warn("%s Couldn't map %ld pfns rc:%d\n", __func__,
+ nr_grant_frames, rc);
+ free_xenballooned_pages(nr_grant_frames, pages);
+ kfree(pages);
+ kfree(pfns);
+- return rc;
++ return -ENOMEM;
+ }
+ kfree(pages);
+
+ xen_auto_xlat_grant_frames.pfn = pfns;
+ xen_auto_xlat_grant_frames.count = nr_grant_frames;
++ xen_auto_xlat_grant_frames.vaddr = vaddr;
+
+ return 0;
+ }
+diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
+index 7b78f88c1707..5718b0b58b60 100644
+--- a/arch/x86/xen/time.c
++++ b/arch/x86/xen/time.c
+@@ -444,7 +444,7 @@ void xen_setup_timer(int cpu)
+
+ irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
+ IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER|
+- IRQF_FORCE_RESUME,
++ IRQF_FORCE_RESUME|IRQF_EARLY_RESUME,
+ name, NULL);
+ (void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX);
+
+diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c
+index 3b7bf2162898..4669e3713428 100644
+--- a/drivers/char/tpm/tpm_i2c_stm_st33.c
++++ b/drivers/char/tpm/tpm_i2c_stm_st33.c
+@@ -714,6 +714,7 @@ tpm_st33_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
+ }
+
+ tpm_get_timeouts(chip);
++ tpm_do_selftest(chip);
+
+ dev_info(chip->dev, "TPM I2C Initialized\n");
+ return 0;
+diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
+index a999f537228f..92105f3dc8e0 100644
+--- a/drivers/crypto/ux500/cryp/cryp_core.c
++++ b/drivers/crypto/ux500/cryp/cryp_core.c
+@@ -190,7 +190,7 @@ static void add_session_id(struct cryp_ctx *ctx)
+ static irqreturn_t cryp_interrupt_handler(int irq, void *param)
+ {
+ struct cryp_ctx *ctx;
+- int i;
++ int count;
+ struct cryp_device_data *device_data;
+
+ if (param == NULL) {
+@@ -215,12 +215,11 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param)
+ if (cryp_pending_irq_src(device_data,
+ CRYP_IRQ_SRC_OUTPUT_FIFO)) {
+ if (ctx->outlen / ctx->blocksize > 0) {
+- for (i = 0; i < ctx->blocksize / 4; i++) {
+- *(ctx->outdata) = readl_relaxed(
+- &device_data->base->dout);
+- ctx->outdata += 4;
+- ctx->outlen -= 4;
+- }
++ count = ctx->blocksize / 4;
++
++ readsl(&device_data->base->dout, ctx->outdata, count);
++ ctx->outdata += count;
++ ctx->outlen -= count;
+
+ if (ctx->outlen == 0) {
+ cryp_disable_irq_src(device_data,
+@@ -230,12 +229,12 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param)
+ } else if (cryp_pending_irq_src(device_data,
+ CRYP_IRQ_SRC_INPUT_FIFO)) {
+ if (ctx->datalen / ctx->blocksize > 0) {
+- for (i = 0 ; i < ctx->blocksize / 4; i++) {
+- writel_relaxed(ctx->indata,
+- &device_data->base->din);
+- ctx->indata += 4;
+- ctx->datalen -= 4;
+- }
++ count = ctx->blocksize / 4;
++
++ writesl(&device_data->base->din, ctx->indata, count);
++
++ ctx->indata += count;
++ ctx->datalen -= count;
+
+ if (ctx->datalen == 0)
+ cryp_disable_irq_src(device_data,
+diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+index f926b4caf449..56c60552abba 100644
+--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
++++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+@@ -199,7 +199,7 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
+ static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
+ struct page **pages, uint32_t npages, uint32_t roll)
+ {
+- dma_addr_t pat_pa = 0;
++ dma_addr_t pat_pa = 0, data_pa = 0;
+ uint32_t *data;
+ struct pat *pat;
+ struct refill_engine *engine = txn->engine_handle;
+@@ -223,7 +223,9 @@ static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
+ .lut_id = engine->tcm->lut_id,
+ };
+
+- data = alloc_dma(txn, 4*i, &pat->data_pa);
++ data = alloc_dma(txn, 4*i, &data_pa);
++ /* FIXME: what if data_pa is more than 32-bit ? */
++ pat->data_pa = data_pa;
+
+ while (i--) {
+ int n = i + roll;
+diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
+index 95dbce286a41..d9f5e5241af4 100644
+--- a/drivers/gpu/drm/omapdrm/omap_gem.c
++++ b/drivers/gpu/drm/omapdrm/omap_gem.c
+@@ -791,7 +791,7 @@ int omap_gem_get_paddr(struct drm_gem_object *obj,
+ omap_obj->paddr = tiler_ssptr(block);
+ omap_obj->block = block;
+
+- DBG("got paddr: %08x", omap_obj->paddr);
++ DBG("got paddr: %pad", &omap_obj->paddr);
+ }
+
+ omap_obj->paddr_cnt++;
+@@ -985,9 +985,9 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
+
+ off = drm_vma_node_start(&obj->vma_node);
+
+- seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
++ seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
+ omap_obj->flags, obj->name, obj->refcount.refcount.counter,
+- off, omap_obj->paddr, omap_obj->paddr_cnt,
++ off, &omap_obj->paddr, omap_obj->paddr_cnt,
+ omap_obj->vaddr, omap_obj->roll);
+
+ if (omap_obj->flags & OMAP_BO_TILED) {
+@@ -1467,8 +1467,8 @@ void omap_gem_init(struct drm_device *dev)
+ entry->paddr = tiler_ssptr(block);
+ entry->block = block;
+
+- DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h,
+- entry->paddr,
++ DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
++ &entry->paddr,
+ usergart[i].stride_pfn << PAGE_SHIFT);
+ }
+ }
+diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
+index 3cf31ee59aac..6af3398b5278 100644
+--- a/drivers/gpu/drm/omapdrm/omap_plane.c
++++ b/drivers/gpu/drm/omapdrm/omap_plane.c
+@@ -142,8 +142,8 @@ static void omap_plane_pre_apply(struct omap_drm_apply *apply)
+ DBG("%dx%d -> %dx%d (%d)", info->width, info->height,
+ info->out_width, info->out_height,
+ info->screen_width);
+- DBG("%d,%d %08x %08x", info->pos_x, info->pos_y,
+- info->paddr, info->p_uv_addr);
++ DBG("%d,%d %pad %pad", info->pos_x, info->pos_y,
++ &info->paddr, &info->p_uv_addr);
+
+ /* TODO: */
+ ilace = false;
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index c0ea66192fe0..767f2cc44bd8 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -3320,6 +3320,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
+ (rdev->pdev->device == 0x130B) ||
+ (rdev->pdev->device == 0x130E) ||
+ (rdev->pdev->device == 0x1315) ||
++ (rdev->pdev->device == 0x1318) ||
+ (rdev->pdev->device == 0x131B)) {
+ rdev->config.cik.max_cu_per_sh = 4;
+ rdev->config.cik.max_backends_per_se = 1;
+diff --git a/drivers/hid/hid-cherry.c b/drivers/hid/hid-cherry.c
+index 1bdcccc54a1d..f745d2c1325e 100644
+--- a/drivers/hid/hid-cherry.c
++++ b/drivers/hid/hid-cherry.c
+@@ -28,7 +28,7 @@
+ static __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+ {
+- if (*rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
++ if (*rsize >= 18 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
+ hid_info(hdev, "fixing up Cherry Cymotion report descriptor\n");
+ rdesc[11] = rdesc[16] = 0xff;
+ rdesc[12] = rdesc[17] = 0x03;
+diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
+index e77696367591..b92bf01a1ae8 100644
+--- a/drivers/hid/hid-kye.c
++++ b/drivers/hid/hid-kye.c
+@@ -300,7 +300,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ * - change the button usage range to 4-7 for the extra
+ * buttons
+ */
+- if (*rsize >= 74 &&
++ if (*rsize >= 75 &&
+ rdesc[61] == 0x05 && rdesc[62] == 0x08 &&
+ rdesc[63] == 0x19 && rdesc[64] == 0x08 &&
+ rdesc[65] == 0x29 && rdesc[66] == 0x0f &&
+diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
+index a976f48263f6..f91ff145db9a 100644
+--- a/drivers/hid/hid-lg.c
++++ b/drivers/hid/hid-lg.c
+@@ -345,14 +345,14 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ struct usb_device_descriptor *udesc;
+ __u16 bcdDevice, rev_maj, rev_min;
+
+- if ((drv_data->quirks & LG_RDESC) && *rsize >= 90 && rdesc[83] == 0x26 &&
++ if ((drv_data->quirks & LG_RDESC) && *rsize >= 91 && rdesc[83] == 0x26 &&
+ rdesc[84] == 0x8c && rdesc[85] == 0x02) {
+ hid_info(hdev,
+ "fixing up Logitech keyboard report descriptor\n");
+ rdesc[84] = rdesc[89] = 0x4d;
+ rdesc[85] = rdesc[90] = 0x10;
+ }
+- if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 50 &&
++ if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 51 &&
+ rdesc[32] == 0x81 && rdesc[33] == 0x06 &&
+ rdesc[49] == 0x81 && rdesc[50] == 0x06) {
+ hid_info(hdev,
+diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
+index 486dbde2ba2d..b7ba82960c79 100644
+--- a/drivers/hid/hid-logitech-dj.c
++++ b/drivers/hid/hid-logitech-dj.c
+@@ -238,13 +238,6 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
+ return;
+ }
+
+- if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
+- (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
+- dev_err(&djrcv_hdev->dev, "%s: invalid device index:%d\n",
+- __func__, dj_report->device_index);
+- return;
+- }
+-
+ if (djrcv_dev->paired_dj_devices[dj_report->device_index]) {
+ /* The device is already known. No need to reallocate it. */
+ dbg_hid("%s: device is already known\n", __func__);
+@@ -557,7 +550,7 @@ static int logi_dj_ll_raw_request(struct hid_device *hid,
+ if (!out_buf)
+ return -ENOMEM;
+
+- if (count < DJREPORT_SHORT_LENGTH - 2)
++ if (count > DJREPORT_SHORT_LENGTH - 2)
+ count = DJREPORT_SHORT_LENGTH - 2;
+
+ out_buf[0] = REPORT_ID_DJ_SHORT;
+@@ -690,6 +683,12 @@ static int logi_dj_raw_event(struct hid_device *hdev,
+ * device (via hid_input_report() ) and return 1 so hid-core does not do
+ * anything else with it.
+ */
++ if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
++ (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
++ dev_err(&hdev->dev, "%s: invalid device index:%d\n",
++ __func__, dj_report->device_index);
++ return false;
++ }
+
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
+ if (dj_report->report_id == REPORT_ID_DJ_SHORT) {
+diff --git a/drivers/hid/hid-monterey.c b/drivers/hid/hid-monterey.c
+index 9e14c00eb1b6..25daf28b26bd 100644
+--- a/drivers/hid/hid-monterey.c
++++ b/drivers/hid/hid-monterey.c
+@@ -24,7 +24,7 @@
+ static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+ {
+- if (*rsize >= 30 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {
++ if (*rsize >= 31 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {
+ hid_info(hdev, "fixing up button/consumer in HID report descriptor\n");
+ rdesc[30] = 0x0c;
+ }
+diff --git a/drivers/hid/hid-petalynx.c b/drivers/hid/hid-petalynx.c
+index 736b2502df4f..6aca4f2554bf 100644
+--- a/drivers/hid/hid-petalynx.c
++++ b/drivers/hid/hid-petalynx.c
+@@ -25,7 +25,7 @@
+ static __u8 *pl_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+ {
+- if (*rsize >= 60 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 &&
++ if (*rsize >= 62 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 &&
+ rdesc[41] == 0x00 && rdesc[59] == 0x26 &&
+ rdesc[60] == 0xf9 && rdesc[61] == 0x00) {
+ hid_info(hdev, "fixing up Petalynx Maxter Remote report descriptor\n");
+diff --git a/drivers/hid/hid-sunplus.c b/drivers/hid/hid-sunplus.c
+index 87fc91e1c8de..91072fa54663 100644
+--- a/drivers/hid/hid-sunplus.c
++++ b/drivers/hid/hid-sunplus.c
+@@ -24,7 +24,7 @@
+ static __u8 *sp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+ {
+- if (*rsize >= 107 && rdesc[104] == 0x26 && rdesc[105] == 0x80 &&
++ if (*rsize >= 112 && rdesc[104] == 0x26 && rdesc[105] == 0x80 &&
+ rdesc[106] == 0x03) {
+ hid_info(hdev, "fixing up Sunplus Wireless Desktop report descriptor\n");
+ rdesc[105] = rdesc[110] = 0x03;
+diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
+index 7f9dc2f86b63..126516414c11 100644
+--- a/drivers/hwmon/ads1015.c
++++ b/drivers/hwmon/ads1015.c
+@@ -198,7 +198,7 @@ static int ads1015_get_channels_config_of(struct i2c_client *client)
+ }
+
+ channel = be32_to_cpup(property);
+- if (channel > ADS1015_CHANNELS) {
++ if (channel >= ADS1015_CHANNELS) {
+ dev_err(&client->dev,
+ "invalid channel index %d on %s\n",
+ channel, node->full_name);
+@@ -212,6 +212,7 @@ static int ads1015_get_channels_config_of(struct i2c_client *client)
+ dev_err(&client->dev,
+ "invalid gain on %s\n",
+ node->full_name);
++ return -EINVAL;
+ }
+ }
+
+@@ -222,6 +223,7 @@ static int ads1015_get_channels_config_of(struct i2c_client *client)
+ dev_err(&client->dev,
+ "invalid data_rate on %s\n",
+ node->full_name);
++ return -EINVAL;
+ }
+ }
+
+diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
+index 9f2be3dd28f3..8a67ec6279a4 100644
+--- a/drivers/hwmon/amc6821.c
++++ b/drivers/hwmon/amc6821.c
+@@ -360,11 +360,13 @@ static ssize_t set_pwm1_enable(
+ if (config)
+ return config;
+
++ mutex_lock(&data->update_lock);
+ config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF1);
+ if (config < 0) {
+ dev_err(&client->dev,
+ "Error reading configuration register, aborting.\n");
+- return config;
++ count = config;
++ goto unlock;
+ }
+
+ switch (val) {
+@@ -381,14 +383,15 @@ static ssize_t set_pwm1_enable(
+ config |= AMC6821_CONF1_FDRC1;
+ break;
+ default:
+- return -EINVAL;
++ count = -EINVAL;
++ goto unlock;
+ }
+- mutex_lock(&data->update_lock);
+ if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF1, config)) {
+ dev_err(&client->dev,
+ "Configuration register write error, aborting.\n");
+ count = -EIO;
+ }
++unlock:
+ mutex_unlock(&data->update_lock);
+ return count;
+ }
+@@ -493,8 +496,9 @@ static ssize_t set_temp_auto_point_temp(
+ return -EINVAL;
+ }
+
+- data->valid = 0;
+ mutex_lock(&data->update_lock);
++ data->valid = 0;
++
+ switch (ix) {
+ case 0:
+ ptemp[0] = clamp_val(val / 1000, 0,
+@@ -658,13 +662,14 @@ static ssize_t set_fan1_div(
+ if (config)
+ return config;
+
++ mutex_lock(&data->update_lock);
+ config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF4);
+ if (config < 0) {
+ dev_err(&client->dev,
+ "Error reading configuration register, aborting.\n");
+- return config;
++ count = config;
++ goto EXIT;
+ }
+- mutex_lock(&data->update_lock);
+ switch (val) {
+ case 2:
+ config &= ~AMC6821_CONF4_PSPR;
+diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
+index 4ae3fff13f44..bea0a344fab5 100644
+--- a/drivers/hwmon/dme1737.c
++++ b/drivers/hwmon/dme1737.c
+@@ -247,8 +247,8 @@ struct dme1737_data {
+ u8 pwm_acz[3];
+ u8 pwm_freq[6];
+ u8 pwm_rr[2];
+- u8 zone_low[3];
+- u8 zone_abs[3];
++ s8 zone_low[3];
++ s8 zone_abs[3];
+ u8 zone_hyst[2];
+ u32 alarms;
+ };
+@@ -277,7 +277,7 @@ static inline int IN_FROM_REG(int reg, int nominal, int res)
+ return (reg * nominal + (3 << (res - 3))) / (3 << (res - 2));
+ }
+
+-static inline int IN_TO_REG(int val, int nominal)
++static inline int IN_TO_REG(long val, int nominal)
+ {
+ return clamp_val((val * 192 + nominal / 2) / nominal, 0, 255);
+ }
+@@ -293,7 +293,7 @@ static inline int TEMP_FROM_REG(int reg, int res)
+ return (reg * 1000) >> (res - 8);
+ }
+
+-static inline int TEMP_TO_REG(int val)
++static inline int TEMP_TO_REG(long val)
+ {
+ return clamp_val((val < 0 ? val - 500 : val + 500) / 1000, -128, 127);
+ }
+@@ -308,7 +308,7 @@ static inline int TEMP_RANGE_FROM_REG(int reg)
+ return TEMP_RANGE[(reg >> 4) & 0x0f];
+ }
+
+-static int TEMP_RANGE_TO_REG(int val, int reg)
++static int TEMP_RANGE_TO_REG(long val, int reg)
+ {
+ int i;
+
+@@ -331,7 +331,7 @@ static inline int TEMP_HYST_FROM_REG(int reg, int ix)
+ return (((ix == 1) ? reg : reg >> 4) & 0x0f) * 1000;
+ }
+
+-static inline int TEMP_HYST_TO_REG(int val, int ix, int reg)
++static inline int TEMP_HYST_TO_REG(long val, int ix, int reg)
+ {
+ int hyst = clamp_val((val + 500) / 1000, 0, 15);
+
+@@ -347,7 +347,7 @@ static inline int FAN_FROM_REG(int reg, int tpc)
+ return (reg == 0 || reg == 0xffff) ? 0 : 90000 * 60 / reg;
+ }
+
+-static inline int FAN_TO_REG(int val, int tpc)
++static inline int FAN_TO_REG(long val, int tpc)
+ {
+ if (tpc) {
+ return clamp_val(val / tpc, 0, 0xffff);
+@@ -379,7 +379,7 @@ static inline int FAN_TYPE_FROM_REG(int reg)
+ return (edge > 0) ? 1 << (edge - 1) : 0;
+ }
+
+-static inline int FAN_TYPE_TO_REG(int val, int reg)
++static inline int FAN_TYPE_TO_REG(long val, int reg)
+ {
+ int edge = (val == 4) ? 3 : val;
+
+@@ -402,7 +402,7 @@ static int FAN_MAX_FROM_REG(int reg)
+ return 1000 + i * 500;
+ }
+
+-static int FAN_MAX_TO_REG(int val)
++static int FAN_MAX_TO_REG(long val)
+ {
+ int i;
+
+@@ -460,7 +460,7 @@ static inline int PWM_ACZ_FROM_REG(int reg)
+ return acz[(reg >> 5) & 0x07];
+ }
+
+-static inline int PWM_ACZ_TO_REG(int val, int reg)
++static inline int PWM_ACZ_TO_REG(long val, int reg)
+ {
+ int acz = (val == 4) ? 2 : val - 1;
+
+@@ -476,7 +476,7 @@ static inline int PWM_FREQ_FROM_REG(int reg)
+ return PWM_FREQ[reg & 0x0f];
+ }
+
+-static int PWM_FREQ_TO_REG(int val, int reg)
++static int PWM_FREQ_TO_REG(long val, int reg)
+ {
+ int i;
+
+@@ -510,7 +510,7 @@ static inline int PWM_RR_FROM_REG(int reg, int ix)
+ return (rr & 0x08) ? PWM_RR[rr & 0x07] : 0;
+ }
+
+-static int PWM_RR_TO_REG(int val, int ix, int reg)
++static int PWM_RR_TO_REG(long val, int ix, int reg)
+ {
+ int i;
+
+@@ -528,7 +528,7 @@ static inline int PWM_RR_EN_FROM_REG(int reg, int ix)
+ return PWM_RR_FROM_REG(reg, ix) ? 1 : 0;
+ }
+
+-static inline int PWM_RR_EN_TO_REG(int val, int ix, int reg)
++static inline int PWM_RR_EN_TO_REG(long val, int ix, int reg)
+ {
+ int en = (ix == 1) ? 0x80 : 0x08;
+
+@@ -1481,13 +1481,16 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+ {
+ struct dme1737_data *data = dev_get_drvdata(dev);
+- long val;
++ unsigned long val;
+ int err;
+
+- err = kstrtol(buf, 10, &val);
++ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
++ if (val > 255)
++ return -EINVAL;
++
+ data->vrm = val;
+ return count;
+ }
+diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
+index 2566c43dd1e9..d10aa7b46cca 100644
+--- a/drivers/hwmon/gpio-fan.c
++++ b/drivers/hwmon/gpio-fan.c
+@@ -173,7 +173,7 @@ static int get_fan_speed_index(struct gpio_fan_data *fan_data)
+ return -ENODEV;
+ }
+
+-static int rpm_to_speed_index(struct gpio_fan_data *fan_data, int rpm)
++static int rpm_to_speed_index(struct gpio_fan_data *fan_data, unsigned long rpm)
+ {
+ struct gpio_fan_speed *speed = fan_data->speed;
+ int i;
+diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
+index 9efadfc851bc..c1eb464f0fd0 100644
+--- a/drivers/hwmon/lm78.c
++++ b/drivers/hwmon/lm78.c
+@@ -108,7 +108,7 @@ static inline int FAN_FROM_REG(u8 val, int div)
+ * TEMP: mC (-128C to +127C)
+ * REG: 1C/bit, two's complement
+ */
+-static inline s8 TEMP_TO_REG(int val)
++static inline s8 TEMP_TO_REG(long val)
+ {
+ int nval = clamp_val(val, -128000, 127000) ;
+ return nval < 0 ? (nval - 500) / 1000 : (nval + 500) / 1000;
+diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
+index b0129a54e1a6..ef627ea71cc8 100644
+--- a/drivers/hwmon/lm85.c
++++ b/drivers/hwmon/lm85.c
+@@ -155,7 +155,7 @@ static inline u16 FAN_TO_REG(unsigned long val)
+
+ /* Temperature is reported in .001 degC increments */
+ #define TEMP_TO_REG(val) \
+- clamp_val(SCALE(val, 1000, 1), -127, 127)
++ DIV_ROUND_CLOSEST(clamp_val((val), -127000, 127000), 1000)
+ #define TEMPEXT_FROM_REG(val, ext) \
+ SCALE(((val) << 4) + (ext), 16, 1000)
+ #define TEMP_FROM_REG(val) ((val) * 1000)
+@@ -189,7 +189,7 @@ static const int lm85_range_map[] = {
+ 13300, 16000, 20000, 26600, 32000, 40000, 53300, 80000
+ };
+
+-static int RANGE_TO_REG(int range)
++static int RANGE_TO_REG(long range)
+ {
+ int i;
+
+@@ -211,7 +211,7 @@ static const int adm1027_freq_map[8] = { /* 1 Hz */
+ 11, 15, 22, 29, 35, 44, 59, 88
+ };
+
+-static int FREQ_TO_REG(const int *map, int freq)
++static int FREQ_TO_REG(const int *map, unsigned long freq)
+ {
+ int i;
+
+@@ -460,6 +460,9 @@ static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr,
+ if (err)
+ return err;
+
++ if (val > 255)
++ return -EINVAL;
++
+ data->vrm = val;
+ return count;
+ }
+diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c
+index d2060e245ff5..cfaf70b9cba7 100644
+--- a/drivers/hwmon/lm92.c
++++ b/drivers/hwmon/lm92.c
+@@ -74,12 +74,9 @@ static inline int TEMP_FROM_REG(s16 reg)
+ return reg / 8 * 625 / 10;
+ }
+
+-static inline s16 TEMP_TO_REG(int val)
++static inline s16 TEMP_TO_REG(long val)
+ {
+- if (val <= -60000)
+- return -60000 * 10 / 625 * 8;
+- if (val >= 160000)
+- return 160000 * 10 / 625 * 8;
++ val = clamp_val(val, -60000, 160000);
+ return val * 10 / 625 * 8;
+ }
+
+@@ -206,10 +203,12 @@ static ssize_t set_temp_hyst(struct device *dev,
+ if (err)
+ return err;
+
++ val = clamp_val(val, -120000, 220000);
+ mutex_lock(&data->update_lock);
+- data->temp[t_hyst] = TEMP_FROM_REG(data->temp[attr->index]) - val;
++ data->temp[t_hyst] =
++ TEMP_TO_REG(TEMP_FROM_REG(data->temp[attr->index]) - val);
+ i2c_smbus_write_word_swapped(client, LM92_REG_TEMP_HYST,
+- TEMP_TO_REG(data->temp[t_hyst]));
++ data->temp[t_hyst]);
+ mutex_unlock(&data->update_lock);
+ return count;
+ }
+diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
+index 3532026e25da..bf1d7893d51c 100644
+--- a/drivers/hwmon/sis5595.c
++++ b/drivers/hwmon/sis5595.c
+@@ -159,7 +159,7 @@ static inline int TEMP_FROM_REG(s8 val)
+ {
+ return val * 830 + 52120;
+ }
+-static inline s8 TEMP_TO_REG(int val)
++static inline s8 TEMP_TO_REG(long val)
+ {
+ int nval = clamp_val(val, -54120, 157530) ;
+ return nval < 0 ? (nval - 5212 - 415) / 830 : (nval - 5212 + 415) / 830;
+diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
+index e95f9ba96790..83c989382be9 100644
+--- a/drivers/i2c/busses/i2c-at91.c
++++ b/drivers/i2c/busses/i2c-at91.c
+@@ -210,7 +210,7 @@ static void at91_twi_write_data_dma_callback(void *data)
+ struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
+
+ dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
+- dev->buf_len, DMA_MEM_TO_DEV);
++ dev->buf_len, DMA_TO_DEVICE);
+
+ at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
+ }
+@@ -289,7 +289,7 @@ static void at91_twi_read_data_dma_callback(void *data)
+ struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
+
+ dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
+- dev->buf_len, DMA_DEV_TO_MEM);
++ dev->buf_len, DMA_FROM_DEVICE);
+
+ /* The last two bytes have to be read without using dma */
+ dev->buf += dev->buf_len - 2;
+diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
+index a9791509966a..69e11853e8bf 100644
+--- a/drivers/i2c/busses/i2c-rk3x.c
++++ b/drivers/i2c/busses/i2c-rk3x.c
+@@ -399,7 +399,7 @@ static irqreturn_t rk3x_i2c_irq(int irqno, void *dev_id)
+ }
+
+ /* is there anything left to handle? */
+- if (unlikely(ipd == 0))
++ if (unlikely((ipd & REG_INT_ALL) == 0))
+ goto out;
+
+ switch (i2c->state) {
+diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
+index 59d20c599b16..2da05c0e113d 100644
+--- a/drivers/misc/mei/client.c
++++ b/drivers/misc/mei/client.c
+@@ -459,7 +459,7 @@ int mei_cl_disconnect(struct mei_cl *cl)
+ {
+ struct mei_device *dev;
+ struct mei_cl_cb *cb;
+- int rets, err;
++ int rets;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+@@ -491,6 +491,7 @@ int mei_cl_disconnect(struct mei_cl *cl)
+ cl_err(dev, cl, "failed to disconnect.\n");
+ goto free;
+ }
++ cl->timer_count = MEI_CONNECT_TIMEOUT;
+ mdelay(10); /* Wait for hardware disconnection ready */
+ list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
+ } else {
+@@ -500,23 +501,18 @@ int mei_cl_disconnect(struct mei_cl *cl)
+ }
+ mutex_unlock(&dev->device_lock);
+
+- err = wait_event_timeout(dev->wait_recvd_msg,
++ wait_event_timeout(dev->wait_recvd_msg,
+ MEI_FILE_DISCONNECTED == cl->state,
+ mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+
+ mutex_lock(&dev->device_lock);
++
+ if (MEI_FILE_DISCONNECTED == cl->state) {
+ rets = 0;
+ cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
+ } else {
+- rets = -ENODEV;
+- if (MEI_FILE_DISCONNECTED != cl->state)
+- cl_err(dev, cl, "wrong status client disconnect.\n");
+-
+- if (err)
+- cl_dbg(dev, cl, "wait failed disconnect err=%d\n", err);
+-
+- cl_err(dev, cl, "failed to disconnect from FW client.\n");
++ cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
++ rets = -ETIME;
+ }
+
+ mei_io_list_flush(&dev->ctrl_rd_list, cl);
+@@ -605,6 +601,7 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
+ cl->timer_count = MEI_CONNECT_TIMEOUT;
+ list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
+ } else {
++ cl->state = MEI_FILE_INITIALIZING;
+ list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+ }
+
+@@ -616,6 +613,7 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
+ mutex_lock(&dev->device_lock);
+
+ if (cl->state != MEI_FILE_CONNECTED) {
++ cl->state = MEI_FILE_DISCONNECTED;
+ /* something went really wrong */
+ if (!cl->status)
+ cl->status = -EFAULT;
+diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
+index 3095fc514a65..5ccc23bc7690 100644
+--- a/drivers/misc/mei/nfc.c
++++ b/drivers/misc/mei/nfc.c
+@@ -342,9 +342,10 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
+ ndev = (struct mei_nfc_dev *) cldev->priv_data;
+ dev = ndev->cl->dev;
+
++ err = -ENOMEM;
+ mei_buf = kzalloc(length + MEI_NFC_HEADER_SIZE, GFP_KERNEL);
+ if (!mei_buf)
+- return -ENOMEM;
++ goto out;
+
+ hdr = (struct mei_nfc_hci_hdr *) mei_buf;
+ hdr->cmd = MEI_NFC_CMD_HCI_SEND;
+@@ -354,12 +355,9 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
+ hdr->data_size = length;
+
+ memcpy(mei_buf + MEI_NFC_HEADER_SIZE, buf, length);
+-
+ err = __mei_cl_send(ndev->cl, mei_buf, length + MEI_NFC_HEADER_SIZE);
+ if (err < 0)
+- return err;
+-
+- kfree(mei_buf);
++ goto out;
+
+ if (!wait_event_interruptible_timeout(ndev->send_wq,
+ ndev->recv_req_id == ndev->req_id, HZ)) {
+@@ -368,7 +366,8 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
+ } else {
+ ndev->req_id++;
+ }
+-
++out:
++ kfree(mei_buf);
+ return err;
+ }
+
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
+index 1b46c64a649f..4b821b4360e1 100644
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -369,7 +369,7 @@ static int mei_me_pm_runtime_idle(struct device *device)
+ if (!dev)
+ return -ENODEV;
+ if (mei_write_is_idle(dev))
+- pm_schedule_suspend(device, MEI_ME_RPM_TIMEOUT * 2);
++ pm_runtime_autosuspend(device);
+
+ return -EBUSY;
+ }
+diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
+index 2343c6236df9..32fef4d5b0b6 100644
+--- a/drivers/misc/mei/pci-txe.c
++++ b/drivers/misc/mei/pci-txe.c
+@@ -306,7 +306,7 @@ static int mei_txe_pm_runtime_idle(struct device *device)
+ if (!dev)
+ return -ENODEV;
+ if (mei_write_is_idle(dev))
+- pm_schedule_suspend(device, MEI_TXI_RPM_TIMEOUT * 2);
++ pm_runtime_autosuspend(device);
+
+ return -EBUSY;
+ }
+diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
+index 7ad463e9741c..249ab80cbb45 100644
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -834,6 +834,10 @@ static void
+ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
+ unsigned int status)
+ {
++ /* Make sure we have data to handle */
++ if (!data)
++ return;
++
+ /* First check for errors */
+ if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
+ MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+@@ -902,9 +906,17 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
+ unsigned int status)
+ {
+ void __iomem *base = host->base;
+- bool sbc = (cmd == host->mrq->sbc);
+- bool busy_resp = host->variant->busy_detect &&
+- (cmd->flags & MMC_RSP_BUSY);
++ bool sbc, busy_resp;
++
++ if (!cmd)
++ return;
++
++ sbc = (cmd == host->mrq->sbc);
++ busy_resp = host->variant->busy_detect && (cmd->flags & MMC_RSP_BUSY);
++
++ if (!((status|host->busy_status) & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|
++ MCI_CMDSENT|MCI_CMDRESPEND)))
++ return;
+
+ /* Check if we need to wait for busy completion. */
+ if (host->busy_status && (status & MCI_ST_CARDBUSY))
+@@ -1132,9 +1144,6 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
+ spin_lock(&host->lock);
+
+ do {
+- struct mmc_command *cmd;
+- struct mmc_data *data;
+-
+ status = readl(host->base + MMCISTATUS);
+
+ if (host->singleirq) {
+@@ -1154,16 +1163,8 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
+
+ dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
+
+- cmd = host->cmd;
+- if ((status|host->busy_status) & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|
+- MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
+- mmci_cmd_irq(host, cmd, status);
+-
+- data = host->data;
+- if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
+- MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
+- MCI_DATABLOCKEND) && data)
+- mmci_data_irq(host, data, status);
++ mmci_cmd_irq(host, host->cmd, status);
++ mmci_data_irq(host, host->data, status);
+
+ /* Don't poll for busy completion in irq context. */
+ if (host->busy_status)
+diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
+index 42914e04d110..056841651a80 100644
+--- a/drivers/pci/hotplug/pciehp_hpc.c
++++ b/drivers/pci/hotplug/pciehp_hpc.c
+@@ -794,7 +794,7 @@ struct controller *pcie_init(struct pcie_device *dev)
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
+ PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
+ PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
+- PCI_EXP_SLTSTA_CC);
++ PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC);
+
+ /* Disable software notification */
+ pcie_disable_notification(ctrl);
+diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
+index a3fbe2012ea3..2ab1b47c7651 100644
+--- a/drivers/pci/pci-label.c
++++ b/drivers/pci/pci-label.c
+@@ -161,8 +161,8 @@ enum acpi_attr_enum {
+ static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf)
+ {
+ int len;
+- len = utf16s_to_utf8s((const wchar_t *)obj->string.pointer,
+- obj->string.length,
++ len = utf16s_to_utf8s((const wchar_t *)obj->buffer.pointer,
++ obj->buffer.length,
+ UTF16_LITTLE_ENDIAN,
+ buf, PAGE_SIZE);
+ buf[len] = '\n';
+@@ -187,16 +187,22 @@ static int dsm_get_label(struct device *dev, char *buf,
+ tmp = obj->package.elements;
+ if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 2 &&
+ tmp[0].type == ACPI_TYPE_INTEGER &&
+- tmp[1].type == ACPI_TYPE_STRING) {
++ (tmp[1].type == ACPI_TYPE_STRING ||
++ tmp[1].type == ACPI_TYPE_BUFFER)) {
+ /*
+ * The second string element is optional even when
+ * this _DSM is implemented; when not implemented,
+ * this entry must return a null string.
+ */
+- if (attr == ACPI_ATTR_INDEX_SHOW)
++ if (attr == ACPI_ATTR_INDEX_SHOW) {
+ scnprintf(buf, PAGE_SIZE, "%llu\n", tmp->integer.value);
+- else if (attr == ACPI_ATTR_LABEL_SHOW)
+- dsm_label_utf16s_to_utf8s(tmp + 1, buf);
++ } else if (attr == ACPI_ATTR_LABEL_SHOW) {
++ if (tmp[1].type == ACPI_TYPE_STRING)
++ scnprintf(buf, PAGE_SIZE, "%s\n",
++ tmp[1].string.pointer);
++ else if (tmp[1].type == ACPI_TYPE_BUFFER)
++ dsm_label_utf16s_to_utf8s(tmp + 1, buf);
++ }
+ len = strlen(buf) > 0 ? strlen(buf) : -1;
+ }
+
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 1c8592b0e146..81d49d3ab221 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -839,12 +839,6 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+
+ if (!__pci_complete_power_transition(dev, state))
+ error = 0;
+- /*
+- * When aspm_policy is "powersave" this call ensures
+- * that ASPM is configured.
+- */
+- if (!error && dev->bus->self)
+- pcie_aspm_powersave_config_link(dev->bus->self);
+
+ return error;
+ }
+@@ -1195,12 +1189,18 @@ int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
+ static int do_pci_enable_device(struct pci_dev *dev, int bars)
+ {
+ int err;
++ struct pci_dev *bridge;
+ u16 cmd;
+ u8 pin;
+
+ err = pci_set_power_state(dev, PCI_D0);
+ if (err < 0 && err != -EIO)
+ return err;
++
++ bridge = pci_upstream_bridge(dev);
++ if (bridge)
++ pcie_aspm_powersave_config_link(bridge);
++
+ err = pcibios_enable_device(dev, bars);
+ if (err < 0)
+ return err;
+diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
+index caed1ce6facd..481c4e18693a 100644
+--- a/drivers/pci/setup-res.c
++++ b/drivers/pci/setup-res.c
+@@ -320,9 +320,11 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
+ resource_size_t min_align)
+ {
+ struct resource *res = dev->resource + resno;
++ unsigned long flags;
+ resource_size_t new_size;
+ int ret;
+
++ flags = res->flags;
+ res->flags |= IORESOURCE_UNSET;
+ if (!res->parent) {
+ dev_info(&dev->dev, "BAR %d: can't reassign an unassigned resource %pR\n",
+@@ -339,7 +341,12 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
+ dev_info(&dev->dev, "BAR %d: reassigned %pR\n", resno, res);
+ if (resno < PCI_BRIDGE_RESOURCES)
+ pci_update_resource(dev, resno);
++ } else {
++ res->flags = flags;
++ dev_info(&dev->dev, "BAR %d: %pR (failed to expand by %#llx)\n",
++ resno, res, (unsigned long long) addsize);
+ }
++
+ return ret;
+ }
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 31184b35370f..489e83b6b5e1 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -5092,7 +5092,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
+ }
+ if (ioc->Request.Type.Direction & XFER_WRITE) {
+ if (copy_from_user(buff[sg_used], data_ptr, sz)) {
+- status = -ENOMEM;
++ status = -EFAULT;
+ goto cleanup1;
+ }
+ } else
+@@ -6365,9 +6365,9 @@ static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
+ {
+ u32 driver_support;
+
+-#ifdef CONFIG_X86
+- /* Need to enable prefetch in the SCSI core for 6400 in x86 */
+ driver_support = readl(&(h->cfgtable->driver_support));
++ /* Need to enable prefetch in the SCSI core for 6400 in x86 */
++#ifdef CONFIG_X86
+ driver_support |= ENABLE_SCSI_PREFETCH;
+ #endif
+ driver_support |= ENABLE_UNIT_ATTN;
+diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
+index 08356b6955a4..2d36eac6889c 100644
+--- a/drivers/staging/et131x/et131x.c
++++ b/drivers/staging/et131x/et131x.c
+@@ -1423,22 +1423,16 @@ static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
+ * @reg: the register to read
+ * @value: 16-bit value to write
+ */
+-static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
++static int et131x_mii_write(struct et131x_adapter *adapter, u8 addr, u8 reg,
++ u16 value)
+ {
+ struct mac_regs __iomem *mac = &adapter->regs->mac;
+- struct phy_device *phydev = adapter->phydev;
+ int status = 0;
+- u8 addr;
+ u32 delay = 0;
+ u32 mii_addr;
+ u32 mii_cmd;
+ u32 mii_indicator;
+
+- if (!phydev)
+- return -EIO;
+-
+- addr = phydev->addr;
+-
+ /* Save a local copy of the registers we are dealing with so we can
+ * set them back
+ */
+@@ -1633,17 +1627,7 @@ static int et131x_mdio_write(struct mii_bus *bus, int phy_addr,
+ struct net_device *netdev = bus->priv;
+ struct et131x_adapter *adapter = netdev_priv(netdev);
+
+- return et131x_mii_write(adapter, reg, value);
+-}
+-
+-static int et131x_mdio_reset(struct mii_bus *bus)
+-{
+- struct net_device *netdev = bus->priv;
+- struct et131x_adapter *adapter = netdev_priv(netdev);
+-
+- et131x_mii_write(adapter, MII_BMCR, BMCR_RESET);
+-
+- return 0;
++ return et131x_mii_write(adapter, phy_addr, reg, value);
+ }
+
+ /* et1310_phy_power_switch - PHY power control
+@@ -1658,18 +1642,20 @@ static int et131x_mdio_reset(struct mii_bus *bus)
+ static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down)
+ {
+ u16 data;
++ struct phy_device *phydev = adapter->phydev;
+
+ et131x_mii_read(adapter, MII_BMCR, &data);
+ data &= ~BMCR_PDOWN;
+ if (down)
+ data |= BMCR_PDOWN;
+- et131x_mii_write(adapter, MII_BMCR, data);
++ et131x_mii_write(adapter, phydev->addr, MII_BMCR, data);
+ }
+
+ /* et131x_xcvr_init - Init the phy if we are setting it into force mode */
+ static void et131x_xcvr_init(struct et131x_adapter *adapter)
+ {
+ u16 lcr2;
++ struct phy_device *phydev = adapter->phydev;
+
+ /* Set the LED behavior such that LED 1 indicates speed (off =
+ * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates
+@@ -1690,7 +1676,7 @@ static void et131x_xcvr_init(struct et131x_adapter *adapter)
+ else
+ lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT);
+
+- et131x_mii_write(adapter, PHY_LED_2, lcr2);
++ et131x_mii_write(adapter, phydev->addr, PHY_LED_2, lcr2);
+ }
+ }
+
+@@ -3645,14 +3631,14 @@ static void et131x_adjust_link(struct net_device *netdev)
+
+ et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
+ &register18);
+- et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
+- register18 | 0x4);
+- et131x_mii_write(adapter, PHY_INDEX_REG,
++ et131x_mii_write(adapter, phydev->addr,
++ PHY_MPHY_CONTROL_REG, register18 | 0x4);
++ et131x_mii_write(adapter, phydev->addr, PHY_INDEX_REG,
+ register18 | 0x8402);
+- et131x_mii_write(adapter, PHY_DATA_REG,
++ et131x_mii_write(adapter, phydev->addr, PHY_DATA_REG,
+ register18 | 511);
+- et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
+- register18);
++ et131x_mii_write(adapter, phydev->addr,
++ PHY_MPHY_CONTROL_REG, register18);
+ }
+
+ et1310_config_flow_control(adapter);
+@@ -3664,7 +3650,8 @@ static void et131x_adjust_link(struct net_device *netdev)
+ et131x_mii_read(adapter, PHY_CONFIG, &reg);
+ reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
+ reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
+- et131x_mii_write(adapter, PHY_CONFIG, reg);
++ et131x_mii_write(adapter, phydev->addr, PHY_CONFIG,
++ reg);
+ }
+
+ et131x_set_rx_dma_timer(adapter);
+@@ -3677,14 +3664,14 @@ static void et131x_adjust_link(struct net_device *netdev)
+
+ et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
+ &register18);
+- et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
+- register18 | 0x4);
+- et131x_mii_write(adapter, PHY_INDEX_REG,
+- register18 | 0x8402);
+- et131x_mii_write(adapter, PHY_DATA_REG,
+- register18 | 511);
+- et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
+- register18);
++ et131x_mii_write(adapter, phydev->addr,
++ PHY_MPHY_CONTROL_REG, register18 | 0x4);
++ et131x_mii_write(adapter, phydev->addr,
++ PHY_INDEX_REG, register18 | 0x8402);
++ et131x_mii_write(adapter, phydev->addr,
++ PHY_DATA_REG, register18 | 511);
++ et131x_mii_write(adapter, phydev->addr,
++ PHY_MPHY_CONTROL_REG, register18);
+ }
+
+ /* Free the packets being actively sent & stopped */
+@@ -4646,10 +4633,6 @@ static int et131x_pci_setup(struct pci_dev *pdev,
+ /* Copy address into the net_device struct */
+ memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
+
+- /* Init variable for counting how long we do not have link status */
+- adapter->boot_coma = 0;
+- et1310_disable_phy_coma(adapter);
+-
+ rc = -ENOMEM;
+
+ /* Setup the mii_bus struct */
+@@ -4665,7 +4648,6 @@ static int et131x_pci_setup(struct pci_dev *pdev,
+ adapter->mii_bus->priv = netdev;
+ adapter->mii_bus->read = et131x_mdio_read;
+ adapter->mii_bus->write = et131x_mdio_write;
+- adapter->mii_bus->reset = et131x_mdio_reset;
+ adapter->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int),
+ GFP_KERNEL);
+ if (!adapter->mii_bus->irq)
+@@ -4689,6 +4671,10 @@ static int et131x_pci_setup(struct pci_dev *pdev,
+ /* Setup et1310 as per the documentation */
+ et131x_adapter_setup(adapter);
+
++ /* Init variable for counting how long we do not have link status */
++ adapter->boot_coma = 0;
++ et1310_disable_phy_coma(adapter);
++
+ /* We can enable interrupts now
+ *
+ * NOTE - Because registration of interrupt handler is done in the
+diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
+index dde04b767a6d..b16687625c44 100644
+--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
++++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
+@@ -35,7 +35,7 @@
+ */
+
+ #define DEBUG_SUBSYSTEM S_CLASS
+-# include <asm/atomic.h>
++# include <linux/atomic.h>
+
+ #include <obd_support.h>
+ #include <obd_class.h>
+diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+index 7526b989dcbf..c4273cd5f7ed 100644
+--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+@@ -54,9 +54,11 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
+ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */
+ /*=== Customer ID ===*/
+ /****** 8188EUS ********/
++ {USB_DEVICE(0x056e, 0x4008)}, /* Elecom WDC-150SU2M */
+ {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
+ {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
+ {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
++ {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
+ {} /* Terminating entry */
+ };
+
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index fbf6c5ad222f..ef2fb367d179 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -243,6 +243,9 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
+ /*
+ * Turn off DTR and RTS early.
+ */
++ if (uart_console(uport) && tty)
++ uport->cons->cflag = tty->termios.c_cflag;
++
+ if (!tty || (tty->termios.c_cflag & HUPCL))
+ uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
+
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index 257876ea03a1..0b59731c3021 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -1509,7 +1509,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
+ u = (is_in ? URB_DIR_IN : URB_DIR_OUT);
+ if (uurb->flags & USBDEVFS_URB_ISO_ASAP)
+ u |= URB_ISO_ASAP;
+- if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
++ if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK && is_in)
+ u |= URB_SHORT_NOT_OK;
+ if (uurb->flags & USBDEVFS_URB_NO_FSBR)
+ u |= URB_NO_FSBR;
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 0e950ad8cb25..27f217107ef1 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1728,8 +1728,14 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ * - Change autosuspend delay of hub can avoid unnecessary auto
+ * suspend timer for hub, also may decrease power consumption
+ * of USB bus.
++ *
++ * - If user has indicated to prevent autosuspend by passing
++ * usbcore.autosuspend = -1 then keep autosuspend disabled.
+ */
+- pm_runtime_set_autosuspend_delay(&hdev->dev, 0);
++#ifdef CONFIG_PM_RUNTIME
++ if (hdev->dev.power.autosuspend_delay >= 0)
++ pm_runtime_set_autosuspend_delay(&hdev->dev, 0);
++#endif
+
+ /*
+ * Hubs have proper suspend/resume support, except for root hubs
+@@ -3264,6 +3270,43 @@ static int finish_port_resume(struct usb_device *udev)
+ }
+
+ /*
++ * There are some SS USB devices which take longer time for link training.
++ * XHCI specs 4.19.4 says that when Link training is successful, port
++ * sets CSC bit to 1. So if SW reads port status before successful link
++ * training, then it will not find device to be present.
++ * USB Analyzer log with such buggy devices show that in some cases
++ * device switch on the RX termination after long delay of host enabling
++ * the VBUS. In few other cases it has been seen that device fails to
++ * negotiate link training in first attempt. It has been
++ * reported till now that few devices take as long as 2000 ms to train
++ * the link after host enabling its VBUS and termination. Following
++ * routine implements a 2000 ms timeout for link training. If in a case
++ * link trains before timeout, loop will exit earlier.
++ *
++ * FIXME: If a device was connected before suspend, but was removed
++ * while system was asleep, then the loop in the following routine will
++ * only exit at timeout.
++ *
++ * This routine should only be called when persist is enabled for a SS
++ * device.
++ */
++static int wait_for_ss_port_enable(struct usb_device *udev,
++ struct usb_hub *hub, int *port1,
++ u16 *portchange, u16 *portstatus)
++{
++ int status = 0, delay_ms = 0;
++
++ while (delay_ms < 2000) {
++ if (status || *portstatus & USB_PORT_STAT_CONNECTION)
++ break;
++ msleep(20);
++ delay_ms += 20;
++ status = hub_port_status(hub, *port1, portstatus, portchange);
++ }
++ return status;
++}
++
++/*
+ * usb_port_resume - re-activate a suspended usb device's upstream port
+ * @udev: device to re-activate, not a root hub
+ * Context: must be able to sleep; device not locked; pm locks held
+@@ -3359,6 +3402,10 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
+ }
+ }
+
++ if (udev->persist_enabled && hub_is_superspeed(hub->hdev))
++ status = wait_for_ss_port_enable(udev, hub, &port1, &portchange,
++ &portstatus);
++
+ status = check_port_resume_type(udev,
+ hub, port1, status, portchange, portstatus);
+ if (status == 0)
+@@ -4550,6 +4597,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
+ struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
+ struct usb_port *port_dev = hub->ports[port1 - 1];
+ struct usb_device *udev = port_dev->child;
++ static int unreliable_port = -1;
+
+ /* Disconnect any existing devices under this port */
+ if (udev) {
+@@ -4570,10 +4618,12 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
+ USB_PORT_STAT_C_ENABLE)) {
+ status = hub_port_debounce_be_stable(hub, port1);
+ if (status < 0) {
+- if (status != -ENODEV && printk_ratelimit())
+- dev_err(&port_dev->dev,
+- "connect-debounce failed\n");
++ if (status != -ENODEV &&
++ port1 != unreliable_port &&
++ printk_ratelimit())
++ dev_err(&port_dev->dev, "connect-debounce failed\n");
+ portstatus &= ~USB_PORT_STAT_CONNECTION;
++ unreliable_port = port1;
+ } else {
+ portstatus = status;
+ }
+diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
+index cc305c71ac3d..6130b7574908 100644
+--- a/drivers/usb/host/ehci-hub.c
++++ b/drivers/usb/host/ehci-hub.c
+@@ -1230,7 +1230,7 @@ int ehci_hub_control(
+ if (selector == EHSET_TEST_SINGLE_STEP_SET_FEATURE) {
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ retval = ehset_single_step_set_feature(hcd,
+- wIndex);
++ wIndex + 1);
+ spin_lock_irqsave(&ehci->lock, flags);
+ break;
+ }
+diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
+index 3e86bf4371b3..ca7b964124af 100644
+--- a/drivers/usb/host/ehci-pci.c
++++ b/drivers/usb/host/ehci-pci.c
+@@ -35,6 +35,21 @@ static const char hcd_name[] = "ehci-pci";
+ #define PCI_DEVICE_ID_INTEL_CE4100_USB 0x2e70
+
+ /*-------------------------------------------------------------------------*/
++#define PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC 0x0939
++static inline bool is_intel_quark_x1000(struct pci_dev *pdev)
++{
++ return pdev->vendor == PCI_VENDOR_ID_INTEL &&
++ pdev->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC;
++}
++
++/*
++ * 0x84 is the offset of in/out threshold register,
++ * and it is the same offset as the register of 'hostpc'.
++ */
++#define intel_quark_x1000_insnreg01 hostpc
++
++/* Maximum usable threshold value is 0x7f dwords for both IN and OUT */
++#define INTEL_QUARK_X1000_EHCI_MAX_THRESHOLD 0x007f007f
+
+ /* called after powerup, by probe or system-pm "wakeup" */
+ static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
+@@ -50,6 +65,16 @@ static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
+ if (!retval)
+ ehci_dbg(ehci, "MWI active\n");
+
++ /* Reset the threshold limit */
++ if (is_intel_quark_x1000(pdev)) {
++ /*
++ * For the Intel QUARK X1000, raise the I/O threshold to the
++ * maximum usable value in order to improve performance.
++ */
++ ehci_writel(ehci, INTEL_QUARK_X1000_EHCI_MAX_THRESHOLD,
++ ehci->regs->intel_quark_x1000_insnreg01);
++ }
++
+ return 0;
+ }
+
+diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
+index 45032e933e18..04f2186939d2 100644
+--- a/drivers/usb/host/ohci-dbg.c
++++ b/drivers/usb/host/ohci-dbg.c
+@@ -236,7 +236,7 @@ ohci_dump_roothub (
+ }
+ }
+
+-static void ohci_dump (struct ohci_hcd *controller, int verbose)
++static void ohci_dump(struct ohci_hcd *controller)
+ {
+ ohci_dbg (controller, "OHCI controller state\n");
+
+@@ -464,15 +464,16 @@ show_list (struct ohci_hcd *ohci, char *buf, size_t count, struct ed *ed)
+ static ssize_t fill_async_buffer(struct debug_buffer *buf)
+ {
+ struct ohci_hcd *ohci;
+- size_t temp;
++ size_t temp, size;
+ unsigned long flags;
+
+ ohci = buf->ohci;
++ size = PAGE_SIZE;
+
+ /* display control and bulk lists together, for simplicity */
+ spin_lock_irqsave (&ohci->lock, flags);
+- temp = show_list(ohci, buf->page, buf->count, ohci->ed_controltail);
+- temp += show_list(ohci, buf->page + temp, buf->count - temp,
++ temp = show_list(ohci, buf->page, size, ohci->ed_controltail);
++ temp += show_list(ohci, buf->page + temp, size - temp,
+ ohci->ed_bulktail);
+ spin_unlock_irqrestore (&ohci->lock, flags);
+
+diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
+index f98d03f3144c..a21a36500fd7 100644
+--- a/drivers/usb/host/ohci-hcd.c
++++ b/drivers/usb/host/ohci-hcd.c
+@@ -76,8 +76,8 @@ static const char hcd_name [] = "ohci_hcd";
+ #include "ohci.h"
+ #include "pci-quirks.h"
+
+-static void ohci_dump (struct ohci_hcd *ohci, int verbose);
+-static void ohci_stop (struct usb_hcd *hcd);
++static void ohci_dump(struct ohci_hcd *ohci);
++static void ohci_stop(struct usb_hcd *hcd);
+
+ #include "ohci-hub.c"
+ #include "ohci-dbg.c"
+@@ -744,7 +744,7 @@ retry:
+ ohci->ed_to_check = NULL;
+ }
+
+- ohci_dump (ohci, 1);
++ ohci_dump(ohci);
+
+ return 0;
+ }
+@@ -825,7 +825,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
+ usb_hc_died(hcd);
+ }
+
+- ohci_dump (ohci, 1);
++ ohci_dump(ohci);
+ ohci_usb_reset (ohci);
+ }
+
+@@ -925,7 +925,7 @@ static void ohci_stop (struct usb_hcd *hcd)
+ {
+ struct ohci_hcd *ohci = hcd_to_ohci (hcd);
+
+- ohci_dump (ohci, 1);
++ ohci_dump(ohci);
+
+ if (quirk_nec(ohci))
+ flush_work(&ohci->nec_work);
+diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
+index d4253e319428..a8bde5b8cbdd 100644
+--- a/drivers/usb/host/ohci-q.c
++++ b/drivers/usb/host/ohci-q.c
+@@ -311,8 +311,7 @@ static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed)
+ * - ED_OPER: when there's any request queued, the ED gets rescheduled
+ * immediately. HC should be working on them.
+ *
+- * - ED_IDLE: when there's no TD queue. there's no reason for the HC
+- * to care about this ED; safe to disable the endpoint.
++ * - ED_IDLE: when there's no TD queue or the HC isn't running.
+ *
+ * When finish_unlinks() runs later, after SOF interrupt, it will often
+ * complete one or more URB unlinks before making that state change.
+@@ -926,6 +925,10 @@ rescan_all:
+ int completed, modified;
+ __hc32 *prev;
+
++ /* Is this ED already invisible to the hardware? */
++ if (ed->state == ED_IDLE)
++ goto ed_idle;
++
+ /* only take off EDs that the HC isn't using, accounting for
+ * frame counter wraps and EDs with partially retired TDs
+ */
+@@ -955,12 +958,20 @@ skip_ed:
+ }
+ }
+
++ /* ED's now officially unlinked, hc doesn't see */
++ ed->state = ED_IDLE;
++ if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT)
++ ohci->eds_scheduled--;
++ ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
++ ed->hwNextED = 0;
++ wmb();
++ ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE);
++ed_idle:
++
+ /* reentrancy: if we drop the schedule lock, someone might
+ * have modified this list. normally it's just prepending
+ * entries (which we'd ignore), but paranoia won't hurt.
+ */
+- *last = ed->ed_next;
+- ed->ed_next = NULL;
+ modified = 0;
+
+ /* unlink urbs as requested, but rescan the list after
+@@ -1018,19 +1029,20 @@ rescan_this:
+ if (completed && !list_empty (&ed->td_list))
+ goto rescan_this;
+
+- /* ED's now officially unlinked, hc doesn't see */
+- ed->state = ED_IDLE;
+- if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT)
+- ohci->eds_scheduled--;
+- ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
+- ed->hwNextED = 0;
+- wmb ();
+- ed->hwINFO &= ~cpu_to_hc32 (ohci, ED_SKIP | ED_DEQUEUE);
+-
+- /* but if there's work queued, reschedule */
+- if (!list_empty (&ed->td_list)) {
+- if (ohci->rh_state == OHCI_RH_RUNNING)
+- ed_schedule (ohci, ed);
++ /*
++ * If no TDs are queued, take ED off the ed_rm_list.
++ * Otherwise, if the HC is running, reschedule.
++ * If not, leave it on the list for further dequeues.
++ */
++ if (list_empty(&ed->td_list)) {
++ *last = ed->ed_next;
++ ed->ed_next = NULL;
++ } else if (ohci->rh_state == OHCI_RH_RUNNING) {
++ *last = ed->ed_next;
++ ed->ed_next = NULL;
++ ed_schedule(ohci, ed);
++ } else {
++ last = &ed->ed_next;
+ }
+
+ if (modified)
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index e20520f42753..994a36e582ca 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -101,6 +101,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ /* AMD PLL quirk */
+ if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
+ xhci->quirks |= XHCI_AMD_PLL_FIX;
++
++ if (pdev->vendor == PCI_VENDOR_ID_AMD)
++ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
++
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
+ xhci->quirks |= XHCI_LPM_SUPPORT;
+ xhci->quirks |= XHCI_INTEL_HOST;
+@@ -143,6 +147,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
++ xhci->quirks |= XHCI_BROKEN_STREAMS;
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+ pdev->device == 0x0015)
+@@ -150,6 +155,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ if (pdev->vendor == PCI_VENDOR_ID_VIA)
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+
++ /* See https://bugzilla.kernel.org/show_bug.cgi?id=79511 */
++ if (pdev->vendor == PCI_VENDOR_ID_VIA &&
++ pdev->device == 0x3432)
++ xhci->quirks |= XHCI_BROKEN_STREAMS;
++
+ if (xhci->quirks & XHCI_RESET_ON_RESUME)
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "QUIRK: Resetting on resume");
+@@ -230,7 +240,8 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ goto put_usb3_hcd;
+ /* Roothub already marked as USB 3.0 speed */
+
+- if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
++ if (!(xhci->quirks & XHCI_BROKEN_STREAMS) &&
++ HCC_MAX_PSA(xhci->hcc_params) >= 4)
+ xhci->shared_hcd->can_do_streams = 1;
+
+ /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 749fc68eb5c1..28a929d45cfe 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -364,32 +364,6 @@ static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
+ }
+ }
+
+-/*
+- * Find the segment that trb is in. Start searching in start_seg.
+- * If we must move past a segment that has a link TRB with a toggle cycle state
+- * bit set, then we will toggle the value pointed at by cycle_state.
+- */
+-static struct xhci_segment *find_trb_seg(
+- struct xhci_segment *start_seg,
+- union xhci_trb *trb, int *cycle_state)
+-{
+- struct xhci_segment *cur_seg = start_seg;
+- struct xhci_generic_trb *generic_trb;
+-
+- while (cur_seg->trbs > trb ||
+- &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
+- generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
+- if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE))
+- *cycle_state ^= 0x1;
+- cur_seg = cur_seg->next;
+- if (cur_seg == start_seg)
+- /* Looped over the entire list. Oops! */
+- return NULL;
+- }
+- return cur_seg;
+-}
+-
+-
+ static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ unsigned int stream_id)
+@@ -459,9 +433,12 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
+ struct xhci_virt_device *dev = xhci->devs[slot_id];
+ struct xhci_virt_ep *ep = &dev->eps[ep_index];
+ struct xhci_ring *ep_ring;
+- struct xhci_generic_trb *trb;
++ struct xhci_segment *new_seg;
++ union xhci_trb *new_deq;
+ dma_addr_t addr;
+ u64 hw_dequeue;
++ bool cycle_found = false;
++ bool td_last_trb_found = false;
+
+ ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
+ ep_index, stream_id);
+@@ -486,45 +463,45 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
+ hw_dequeue = le64_to_cpu(ep_ctx->deq);
+ }
+
+- /* Find virtual address and segment of hardware dequeue pointer */
+- state->new_deq_seg = ep_ring->deq_seg;
+- state->new_deq_ptr = ep_ring->dequeue;
+- while (xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr)
+- != (dma_addr_t)(hw_dequeue & ~0xf)) {
+- next_trb(xhci, ep_ring, &state->new_deq_seg,
+- &state->new_deq_ptr);
+- if (state->new_deq_ptr == ep_ring->dequeue) {
+- WARN_ON(1);
+- return;
+- }
+- }
++ new_seg = ep_ring->deq_seg;
++ new_deq = ep_ring->dequeue;
++ state->new_cycle_state = hw_dequeue & 0x1;
++
+ /*
+- * Find cycle state for last_trb, starting at old cycle state of
+- * hw_dequeue. If there is only one segment ring, find_trb_seg() will
+- * return immediately and cannot toggle the cycle state if this search
+- * wraps around, so add one more toggle manually in that case.
++ * We want to find the pointer, segment and cycle state of the new trb
++ * (the one after current TD's last_trb). We know the cycle state at
++ * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
++ * found.
+ */
+- state->new_cycle_state = hw_dequeue & 0x1;
+- if (ep_ring->first_seg == ep_ring->first_seg->next &&
+- cur_td->last_trb < state->new_deq_ptr)
+- state->new_cycle_state ^= 0x1;
++ do {
++ if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
++ == (dma_addr_t)(hw_dequeue & ~0xf)) {
++ cycle_found = true;
++ if (td_last_trb_found)
++ break;
++ }
++ if (new_deq == cur_td->last_trb)
++ td_last_trb_found = true;
+
+- state->new_deq_ptr = cur_td->last_trb;
+- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+- "Finding segment containing last TRB in TD.");
+- state->new_deq_seg = find_trb_seg(state->new_deq_seg,
+- state->new_deq_ptr, &state->new_cycle_state);
+- if (!state->new_deq_seg) {
+- WARN_ON(1);
+- return;
+- }
++ if (cycle_found &&
++ TRB_TYPE_LINK_LE32(new_deq->generic.field[3]) &&
++ new_deq->generic.field[3] & cpu_to_le32(LINK_TOGGLE))
++ state->new_cycle_state ^= 0x1;
++
++ next_trb(xhci, ep_ring, &new_seg, &new_deq);
++
++ /* Search wrapped around, bail out */
++ if (new_deq == ep->ring->dequeue) {
++ xhci_err(xhci, "Error: Failed finding new dequeue state\n");
++ state->new_deq_seg = NULL;
++ state->new_deq_ptr = NULL;
++ return;
++ }
++
++ } while (!cycle_found || !td_last_trb_found);
+
+- /* Increment to find next TRB after last_trb. Cycle if appropriate. */
+- trb = &state->new_deq_ptr->generic;
+- if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
+- (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
+- state->new_cycle_state ^= 0x1;
+- next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
++ state->new_deq_seg = new_seg;
++ state->new_deq_ptr = new_deq;
+
+ /* Don't update the ring cycle state for the producer (us). */
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+@@ -2483,7 +2460,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ * last TRB of the previous TD. The command completion handle
+ * will take care the rest.
+ */
+- if (!event_seg && trb_comp_code == COMP_STOP_INVAL) {
++ if (!event_seg && (trb_comp_code == COMP_STOP ||
++ trb_comp_code == COMP_STOP_INVAL)) {
+ ret = 0;
+ goto cleanup;
+ }
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 7436d5f5e67a..e32cc6cf86dc 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -2891,6 +2891,9 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
+ ep_index, ep->stopped_stream, ep->stopped_td,
+ &deq_state);
+
++ if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
++ return;
++
+ /* HW with the reset endpoint quirk will use the saved dequeue state to
+ * issue a configure endpoint command later.
+ */
+@@ -3163,7 +3166,8 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
+ num_streams);
+
+ /* MaxPSASize value 0 (2 streams) means streams are not supported */
+- if (HCC_MAX_PSA(xhci->hcc_params) < 4) {
++ if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
++ HCC_MAX_PSA(xhci->hcc_params) < 4) {
+ xhci_dbg(xhci, "xHCI controller does not support streams.\n");
+ return -ENOSYS;
+ }
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 9ffecd56600d..dace5152e179 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1558,6 +1558,8 @@ struct xhci_hcd {
+ #define XHCI_PLAT (1 << 16)
+ #define XHCI_SLOW_SUSPEND (1 << 17)
+ #define XHCI_SPURIOUS_WAKEUP (1 << 18)
++/* For controllers with a broken beyond repair streams implementation */
++#define XHCI_BROKEN_STREAMS (1 << 19)
+ unsigned int num_active_eps;
+ unsigned int limit_active_eps;
+ /* There are two roothubs to keep track of bus suspend info for */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 8a3813be1b28..8b0f517abb6b 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -151,6 +151,7 @@ static const struct usb_device_id id_table_combined[] = {
+ { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
++ { USB_DEVICE(FTDI_VID, FTDI_BM_ATOM_NANO_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_EV3CON_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
+@@ -673,6 +674,8 @@ static const struct usb_device_id id_table_combined[] = {
+ { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) },
+ { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) },
+ { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) },
++ { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
++ { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
+ { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) },
+@@ -945,6 +948,8 @@ static const struct usb_device_id id_table_combined[] = {
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) },
++ /* ekey Devices */
++ { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
+ /* Infineon Devices */
+ { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
+ { } /* Terminating entry */
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index c4777bc6aee0..70b0b1d88ae9 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -42,6 +42,8 @@
+ /* www.candapter.com Ewert Energy Systems CANdapter device */
+ #define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
+
++#define FTDI_BM_ATOM_NANO_PID 0xa559 /* Basic Micro ATOM Nano USB2Serial */
++
+ /*
+ * Texas Instruments XDS100v2 JTAG / BeagleBone A3
+ * http://processors.wiki.ti.com/index.php/XDS100
+@@ -140,12 +142,15 @@
+ /*
+ * Xsens Technologies BV products (http://www.xsens.com).
+ */
+-#define XSENS_CONVERTER_0_PID 0xD388
+-#define XSENS_CONVERTER_1_PID 0xD389
++#define XSENS_VID 0x2639
++#define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */
++#define XSENS_MTW_PID 0x0200 /* Xsens MTw */
++#define XSENS_CONVERTER_0_PID 0xD388 /* Xsens USB converter */
++#define XSENS_CONVERTER_1_PID 0xD389 /* Xsens Wireless Receiver */
+ #define XSENS_CONVERTER_2_PID 0xD38A
+-#define XSENS_CONVERTER_3_PID 0xD38B
+-#define XSENS_CONVERTER_4_PID 0xD38C
+-#define XSENS_CONVERTER_5_PID 0xD38D
++#define XSENS_CONVERTER_3_PID 0xD38B /* Xsens USB-serial converter */
++#define XSENS_CONVERTER_4_PID 0xD38C /* Xsens Wireless Receiver */
++#define XSENS_CONVERTER_5_PID 0xD38D /* Xsens Awinda Station */
+ #define XSENS_CONVERTER_6_PID 0xD38E
+ #define XSENS_CONVERTER_7_PID 0xD38F
+
+@@ -1375,3 +1380,8 @@
+ #define BRAINBOXES_US_160_6_PID 0x9006 /* US-160 16xRS232 1Mbaud Port 11 and 12 */
+ #define BRAINBOXES_US_160_7_PID 0x9007 /* US-160 16xRS232 1Mbaud Port 13 and 14 */
+ #define BRAINBOXES_US_160_8_PID 0x9008 /* US-160 16xRS232 1Mbaud Port 15 and 16 */
++
++/*
++ * ekey biometric systems GmbH (http://ekey.net/)
++ */
++#define FTDI_EKEY_CONV_USB_PID 0xCB08 /* Converter USB */
+diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
+index e62f2dff8b7d..6c3734d2b45a 100644
+--- a/drivers/usb/serial/whiteheat.c
++++ b/drivers/usb/serial/whiteheat.c
+@@ -514,6 +514,10 @@ static void command_port_read_callback(struct urb *urb)
+ dev_dbg(&urb->dev->dev, "%s - command_info is NULL, exiting.\n", __func__);
+ return;
+ }
++ if (!urb->actual_length) {
++ dev_dbg(&urb->dev->dev, "%s - empty response, exiting.\n", __func__);
++ return;
++ }
+ if (status) {
+ dev_dbg(&urb->dev->dev, "%s - nonzero urb status: %d\n", __func__, status);
+ if (status != -ENOENT)
+@@ -534,7 +538,8 @@ static void command_port_read_callback(struct urb *urb)
+ /* These are unsolicited reports from the firmware, hence no
+ waiting command to wakeup */
+ dev_dbg(&urb->dev->dev, "%s - event received\n", __func__);
+- } else if (data[0] == WHITEHEAT_GET_DTR_RTS) {
++ } else if ((data[0] == WHITEHEAT_GET_DTR_RTS) &&
++ (urb->actual_length - 1 <= sizeof(command_info->result_buffer))) {
+ memcpy(command_info->result_buffer, &data[1],
+ urb->actual_length - 1);
+ command_info->command_finished = WHITEHEAT_CMD_COMPLETE;
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index 511b22953167..3f42785f653c 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -1026,7 +1026,7 @@ static int uas_configure_endpoints(struct uas_dev_info *devinfo)
+ usb_endpoint_num(&eps[3]->desc));
+
+ if (udev->speed != USB_SPEED_SUPER) {
+- devinfo->qdepth = 256;
++ devinfo->qdepth = 32;
+ devinfo->use_streams = 0;
+ } else {
+ devinfo->qdepth = usb_alloc_streams(devinfo->intf, eps + 1,
+diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
+index 84b4bfb84344..500713882ad5 100644
+--- a/drivers/xen/events/events_fifo.c
++++ b/drivers/xen/events/events_fifo.c
+@@ -67,10 +67,9 @@ static event_word_t *event_array[MAX_EVENT_ARRAY_PAGES] __read_mostly;
+ static unsigned event_array_pages __read_mostly;
+
+ /*
+- * sync_set_bit() and friends must be unsigned long aligned on non-x86
+- * platforms.
++ * sync_set_bit() and friends must be unsigned long aligned.
+ */
+-#if !defined(CONFIG_X86) && BITS_PER_LONG > 32
++#if BITS_PER_LONG > 32
+
+ #define BM(w) (unsigned long *)((unsigned long)w & ~0x7UL)
+ #define EVTCHN_FIFO_BIT(b, w) \
+diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
+index 5a201d81049c..fbd76ded9a34 100644
+--- a/fs/btrfs/async-thread.c
++++ b/fs/btrfs/async-thread.c
+@@ -22,7 +22,6 @@
+ #include <linux/list.h>
+ #include <linux/spinlock.h>
+ #include <linux/freezer.h>
+-#include <linux/workqueue.h>
+ #include "async-thread.h"
+ #include "ctree.h"
+
+@@ -55,8 +54,39 @@ struct btrfs_workqueue {
+ struct __btrfs_workqueue *high;
+ };
+
+-static inline struct __btrfs_workqueue
+-*__btrfs_alloc_workqueue(const char *name, int flags, int max_active,
++static void normal_work_helper(struct btrfs_work *work);
++
++#define BTRFS_WORK_HELPER(name) \
++void btrfs_##name(struct work_struct *arg) \
++{ \
++ struct btrfs_work *work = container_of(arg, struct btrfs_work, \
++ normal_work); \
++ normal_work_helper(work); \
++}
++
++BTRFS_WORK_HELPER(worker_helper);
++BTRFS_WORK_HELPER(delalloc_helper);
++BTRFS_WORK_HELPER(flush_delalloc_helper);
++BTRFS_WORK_HELPER(cache_helper);
++BTRFS_WORK_HELPER(submit_helper);
++BTRFS_WORK_HELPER(fixup_helper);
++BTRFS_WORK_HELPER(endio_helper);
++BTRFS_WORK_HELPER(endio_meta_helper);
++BTRFS_WORK_HELPER(endio_meta_write_helper);
++BTRFS_WORK_HELPER(endio_raid56_helper);
++BTRFS_WORK_HELPER(rmw_helper);
++BTRFS_WORK_HELPER(endio_write_helper);
++BTRFS_WORK_HELPER(freespace_write_helper);
++BTRFS_WORK_HELPER(delayed_meta_helper);
++BTRFS_WORK_HELPER(readahead_helper);
++BTRFS_WORK_HELPER(qgroup_rescan_helper);
++BTRFS_WORK_HELPER(extent_refs_helper);
++BTRFS_WORK_HELPER(scrub_helper);
++BTRFS_WORK_HELPER(scrubwrc_helper);
++BTRFS_WORK_HELPER(scrubnc_helper);
++
++static struct __btrfs_workqueue *
++__btrfs_alloc_workqueue(const char *name, int flags, int max_active,
+ int thresh)
+ {
+ struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
+@@ -232,13 +262,11 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
+ spin_unlock_irqrestore(lock, flags);
+ }
+
+-static void normal_work_helper(struct work_struct *arg)
++static void normal_work_helper(struct btrfs_work *work)
+ {
+- struct btrfs_work *work;
+ struct __btrfs_workqueue *wq;
+ int need_order = 0;
+
+- work = container_of(arg, struct btrfs_work, normal_work);
+ /*
+ * We should not touch things inside work in the following cases:
+ * 1) after work->func() if it has no ordered_free
+@@ -262,7 +290,7 @@ static void normal_work_helper(struct work_struct *arg)
+ trace_btrfs_all_work_done(work);
+ }
+
+-void btrfs_init_work(struct btrfs_work *work,
++void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
+ btrfs_func_t func,
+ btrfs_func_t ordered_func,
+ btrfs_func_t ordered_free)
+@@ -270,7 +298,7 @@ void btrfs_init_work(struct btrfs_work *work,
+ work->func = func;
+ work->ordered_func = ordered_func;
+ work->ordered_free = ordered_free;
+- INIT_WORK(&work->normal_work, normal_work_helper);
++ INIT_WORK(&work->normal_work, uniq_func);
+ INIT_LIST_HEAD(&work->ordered_list);
+ work->flags = 0;
+ }
+diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
+index 9c6b66d15fb0..e9e31c94758f 100644
+--- a/fs/btrfs/async-thread.h
++++ b/fs/btrfs/async-thread.h
+@@ -19,12 +19,14 @@
+
+ #ifndef __BTRFS_ASYNC_THREAD_
+ #define __BTRFS_ASYNC_THREAD_
++#include <linux/workqueue.h>
+
+ struct btrfs_workqueue;
+ /* Internal use only */
+ struct __btrfs_workqueue;
+ struct btrfs_work;
+ typedef void (*btrfs_func_t)(struct btrfs_work *arg);
++typedef void (*btrfs_work_func_t)(struct work_struct *arg);
+
+ struct btrfs_work {
+ btrfs_func_t func;
+@@ -38,11 +40,35 @@ struct btrfs_work {
+ unsigned long flags;
+ };
+
++#define BTRFS_WORK_HELPER_PROTO(name) \
++void btrfs_##name(struct work_struct *arg)
++
++BTRFS_WORK_HELPER_PROTO(worker_helper);
++BTRFS_WORK_HELPER_PROTO(delalloc_helper);
++BTRFS_WORK_HELPER_PROTO(flush_delalloc_helper);
++BTRFS_WORK_HELPER_PROTO(cache_helper);
++BTRFS_WORK_HELPER_PROTO(submit_helper);
++BTRFS_WORK_HELPER_PROTO(fixup_helper);
++BTRFS_WORK_HELPER_PROTO(endio_helper);
++BTRFS_WORK_HELPER_PROTO(endio_meta_helper);
++BTRFS_WORK_HELPER_PROTO(endio_meta_write_helper);
++BTRFS_WORK_HELPER_PROTO(endio_raid56_helper);
++BTRFS_WORK_HELPER_PROTO(rmw_helper);
++BTRFS_WORK_HELPER_PROTO(endio_write_helper);
++BTRFS_WORK_HELPER_PROTO(freespace_write_helper);
++BTRFS_WORK_HELPER_PROTO(delayed_meta_helper);
++BTRFS_WORK_HELPER_PROTO(readahead_helper);
++BTRFS_WORK_HELPER_PROTO(qgroup_rescan_helper);
++BTRFS_WORK_HELPER_PROTO(extent_refs_helper);
++BTRFS_WORK_HELPER_PROTO(scrub_helper);
++BTRFS_WORK_HELPER_PROTO(scrubwrc_helper);
++BTRFS_WORK_HELPER_PROTO(scrubnc_helper);
++
+ struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
+ int flags,
+ int max_active,
+ int thresh);
+-void btrfs_init_work(struct btrfs_work *work,
++void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper,
+ btrfs_func_t func,
+ btrfs_func_t ordered_func,
+ btrfs_func_t ordered_free);
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index e25564bfcb46..54a201dac7f9 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -276,9 +276,8 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
+ }
+ if (ret > 0)
+ goto next;
+- ret = ulist_add_merge(parents, eb->start,
+- (uintptr_t)eie,
+- (u64 *)&old, GFP_NOFS);
++ ret = ulist_add_merge_ptr(parents, eb->start,
++ eie, (void **)&old, GFP_NOFS);
+ if (ret < 0)
+ break;
+ if (!ret && extent_item_pos) {
+@@ -1001,16 +1000,19 @@ again:
+ ret = -EIO;
+ goto out;
+ }
++ btrfs_tree_read_lock(eb);
++ btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ ret = find_extent_in_eb(eb, bytenr,
+ *extent_item_pos, &eie);
++ btrfs_tree_read_unlock_blocking(eb);
+ free_extent_buffer(eb);
+ if (ret < 0)
+ goto out;
+ ref->inode_list = eie;
+ }
+- ret = ulist_add_merge(refs, ref->parent,
+- (uintptr_t)ref->inode_list,
+- (u64 *)&eie, GFP_NOFS);
++ ret = ulist_add_merge_ptr(refs, ref->parent,
++ ref->inode_list,
++ (void **)&eie, GFP_NOFS);
+ if (ret < 0)
+ goto out;
+ if (!ret && extent_item_pos) {
+diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
+index 4794923c410c..43527fd78825 100644
+--- a/fs/btrfs/btrfs_inode.h
++++ b/fs/btrfs/btrfs_inode.h
+@@ -84,12 +84,6 @@ struct btrfs_inode {
+ */
+ struct list_head delalloc_inodes;
+
+- /*
+- * list for tracking inodes that must be sent to disk before a
+- * rename or truncate commit
+- */
+- struct list_head ordered_operations;
+-
+ /* node for the red-black tree that links inodes in subvolume root */
+ struct rb_node rb_node;
+
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index da775bfdebc9..a2e90f855d7d 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -1395,8 +1395,8 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
+ return -ENOMEM;
+
+ async_work->delayed_root = delayed_root;
+- btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root,
+- NULL, NULL);
++ btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
++ btrfs_async_run_delayed_root, NULL, NULL);
+ async_work->nr = nr;
+
+ btrfs_queue_work(root->fs_info->delayed_workers, &async_work->work);
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 08e65e9cf2aa..0229c3720b30 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -39,7 +39,6 @@
+ #include "btrfs_inode.h"
+ #include "volumes.h"
+ #include "print-tree.h"
+-#include "async-thread.h"
+ #include "locking.h"
+ #include "tree-log.h"
+ #include "free-space-cache.h"
+@@ -60,8 +59,6 @@ static void end_workqueue_fn(struct btrfs_work *work);
+ static void free_fs_root(struct btrfs_root *root);
+ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
+ int read_only);
+-static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
+- struct btrfs_root *root);
+ static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
+ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
+ struct btrfs_root *root);
+@@ -695,35 +692,41 @@ static void end_workqueue_bio(struct bio *bio, int err)
+ {
+ struct end_io_wq *end_io_wq = bio->bi_private;
+ struct btrfs_fs_info *fs_info;
++ struct btrfs_workqueue *wq;
++ btrfs_work_func_t func;
+
+ fs_info = end_io_wq->info;
+ end_io_wq->error = err;
+- btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL);
+
+ if (bio->bi_rw & REQ_WRITE) {
+- if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
+- btrfs_queue_work(fs_info->endio_meta_write_workers,
+- &end_io_wq->work);
+- else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
+- btrfs_queue_work(fs_info->endio_freespace_worker,
+- &end_io_wq->work);
+- else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
+- btrfs_queue_work(fs_info->endio_raid56_workers,
+- &end_io_wq->work);
+- else
+- btrfs_queue_work(fs_info->endio_write_workers,
+- &end_io_wq->work);
++ if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
++ wq = fs_info->endio_meta_write_workers;
++ func = btrfs_endio_meta_write_helper;
++ } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
++ wq = fs_info->endio_freespace_worker;
++ func = btrfs_freespace_write_helper;
++ } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
++ wq = fs_info->endio_raid56_workers;
++ func = btrfs_endio_raid56_helper;
++ } else {
++ wq = fs_info->endio_write_workers;
++ func = btrfs_endio_write_helper;
++ }
+ } else {
+- if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
+- btrfs_queue_work(fs_info->endio_raid56_workers,
+- &end_io_wq->work);
+- else if (end_io_wq->metadata)
+- btrfs_queue_work(fs_info->endio_meta_workers,
+- &end_io_wq->work);
+- else
+- btrfs_queue_work(fs_info->endio_workers,
+- &end_io_wq->work);
++ if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
++ wq = fs_info->endio_raid56_workers;
++ func = btrfs_endio_raid56_helper;
++ } else if (end_io_wq->metadata) {
++ wq = fs_info->endio_meta_workers;
++ func = btrfs_endio_meta_helper;
++ } else {
++ wq = fs_info->endio_workers;
++ func = btrfs_endio_helper;
++ }
+ }
++
++ btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
++ btrfs_queue_work(wq, &end_io_wq->work);
+ }
+
+ /*
+@@ -830,7 +833,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
+ async->submit_bio_start = submit_bio_start;
+ async->submit_bio_done = submit_bio_done;
+
+- btrfs_init_work(&async->work, run_one_async_start,
++ btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
+ run_one_async_done, run_one_async_free);
+
+ async->bio_flags = bio_flags;
+@@ -3829,34 +3832,6 @@ static void btrfs_error_commit_super(struct btrfs_root *root)
+ btrfs_cleanup_transaction(root);
+ }
+
+-static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
+- struct btrfs_root *root)
+-{
+- struct btrfs_inode *btrfs_inode;
+- struct list_head splice;
+-
+- INIT_LIST_HEAD(&splice);
+-
+- mutex_lock(&root->fs_info->ordered_operations_mutex);
+- spin_lock(&root->fs_info->ordered_root_lock);
+-
+- list_splice_init(&t->ordered_operations, &splice);
+- while (!list_empty(&splice)) {
+- btrfs_inode = list_entry(splice.next, struct btrfs_inode,
+- ordered_operations);
+-
+- list_del_init(&btrfs_inode->ordered_operations);
+- spin_unlock(&root->fs_info->ordered_root_lock);
+-
+- btrfs_invalidate_inodes(btrfs_inode->root);
+-
+- spin_lock(&root->fs_info->ordered_root_lock);
+- }
+-
+- spin_unlock(&root->fs_info->ordered_root_lock);
+- mutex_unlock(&root->fs_info->ordered_operations_mutex);
+-}
+-
+ static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
+ {
+ struct btrfs_ordered_extent *ordered;
+@@ -4093,8 +4068,6 @@ again:
+ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
+ struct btrfs_root *root)
+ {
+- btrfs_destroy_ordered_operations(cur_trans, root);
+-
+ btrfs_destroy_delayed_refs(cur_trans, root);
+
+ cur_trans->state = TRANS_STATE_COMMIT_START;
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 813537f362f9..8edb9fcc38d5 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -552,7 +552,8 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
+ caching_ctl->block_group = cache;
+ caching_ctl->progress = cache->key.objectid;
+ atomic_set(&caching_ctl->count, 1);
+- btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
++ btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
++ caching_thread, NULL, NULL);
+
+ spin_lock(&cache->lock);
+ /*
+@@ -2749,8 +2750,8 @@ int btrfs_async_run_delayed_refs(struct btrfs_root *root,
+ async->sync = 0;
+ init_completion(&async->wait);
+
+- btrfs_init_work(&async->work, delayed_ref_async_start,
+- NULL, NULL);
++ btrfs_init_work(&async->work, btrfs_extent_refs_helper,
++ delayed_ref_async_start, NULL, NULL);
+
+ btrfs_queue_work(root->fs_info->extent_workers, &async->work);
+
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index a389820d158b..09b4e3165e2c 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -2532,6 +2532,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
+ test_bit(BIO_UPTODATE, &bio->bi_flags);
+ if (err)
+ uptodate = 0;
++ offset += len;
+ continue;
+ }
+ }
+diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
+index f46cfe45d686..54c84daec9b5 100644
+--- a/fs/btrfs/file-item.c
++++ b/fs/btrfs/file-item.c
+@@ -756,7 +756,7 @@ again:
+ found_next = 1;
+ if (ret != 0)
+ goto insert;
+- slot = 0;
++ slot = path->slots[0];
+ }
+ btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
+ if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 1f2b99cb55ea..ab1fd668020d 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1838,6 +1838,8 @@ out:
+
+ int btrfs_release_file(struct inode *inode, struct file *filp)
+ {
++ if (filp->private_data)
++ btrfs_ioctl_trans_end(filp);
+ /*
+ * ordered_data_close is set by settattr when we are about to truncate
+ * a file from a non-zero size to a zero size. This tries to
+@@ -1845,26 +1847,8 @@ int btrfs_release_file(struct inode *inode, struct file *filp)
+ * application were using truncate to replace a file in place.
+ */
+ if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
+- &BTRFS_I(inode)->runtime_flags)) {
+- struct btrfs_trans_handle *trans;
+- struct btrfs_root *root = BTRFS_I(inode)->root;
+-
+- /*
+- * We need to block on a committing transaction to keep us from
+- * throwing a ordered operation on to the list and causing
+- * something like sync to deadlock trying to flush out this
+- * inode.
+- */
+- trans = btrfs_start_transaction(root, 0);
+- if (IS_ERR(trans))
+- return PTR_ERR(trans);
+- btrfs_add_ordered_operation(trans, BTRFS_I(inode)->root, inode);
+- btrfs_end_transaction(trans, root);
+- if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
++ &BTRFS_I(inode)->runtime_flags))
+ filemap_flush(inode->i_mapping);
+- }
+- if (filp->private_data)
+- btrfs_ioctl_trans_end(filp);
+ return 0;
+ }
+
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 3668048e16f8..c6cd34e699d0 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -709,6 +709,18 @@ retry:
+ unlock_extent(io_tree, async_extent->start,
+ async_extent->start +
+ async_extent->ram_size - 1);
++
++ /*
++ * we need to redirty the pages if we decide to
++ * fallback to uncompressed IO, otherwise we
++ * will not submit these pages down to lower
++ * layers.
++ */
++ extent_range_redirty_for_io(inode,
++ async_extent->start,
++ async_extent->start +
++ async_extent->ram_size - 1);
++
+ goto retry;
+ }
+ goto out_free;
+@@ -1084,8 +1096,10 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
+ async_cow->end = cur_end;
+ INIT_LIST_HEAD(&async_cow->extents);
+
+- btrfs_init_work(&async_cow->work, async_cow_start,
+- async_cow_submit, async_cow_free);
++ btrfs_init_work(&async_cow->work,
++ btrfs_delalloc_helper,
++ async_cow_start, async_cow_submit,
++ async_cow_free);
+
+ nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
+ PAGE_CACHE_SHIFT;
+@@ -1869,7 +1883,8 @@ static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
+
+ SetPageChecked(page);
+ page_cache_get(page);
+- btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL);
++ btrfs_init_work(&fixup->work, btrfs_fixup_helper,
++ btrfs_writepage_fixup_worker, NULL, NULL);
+ fixup->page = page;
+ btrfs_queue_work(root->fs_info->fixup_workers, &fixup->work);
+ return -EBUSY;
+@@ -2810,7 +2825,8 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
+ struct inode *inode = page->mapping->host;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_ordered_extent *ordered_extent = NULL;
+- struct btrfs_workqueue *workers;
++ struct btrfs_workqueue *wq;
++ btrfs_work_func_t func;
+
+ trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
+
+@@ -2819,13 +2835,17 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
+ end - start + 1, uptodate))
+ return 0;
+
+- btrfs_init_work(&ordered_extent->work, finish_ordered_fn, NULL, NULL);
++ if (btrfs_is_free_space_inode(inode)) {
++ wq = root->fs_info->endio_freespace_worker;
++ func = btrfs_freespace_write_helper;
++ } else {
++ wq = root->fs_info->endio_write_workers;
++ func = btrfs_endio_write_helper;
++ }
+
+- if (btrfs_is_free_space_inode(inode))
+- workers = root->fs_info->endio_freespace_worker;
+- else
+- workers = root->fs_info->endio_write_workers;
+- btrfs_queue_work(workers, &ordered_extent->work);
++ btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL,
++ NULL);
++ btrfs_queue_work(wq, &ordered_extent->work);
+
+ return 0;
+ }
+@@ -7146,7 +7166,8 @@ again:
+ if (!ret)
+ goto out_test;
+
+- btrfs_init_work(&ordered->work, finish_ordered_fn, NULL, NULL);
++ btrfs_init_work(&ordered->work, btrfs_endio_write_helper,
++ finish_ordered_fn, NULL, NULL);
+ btrfs_queue_work(root->fs_info->endio_write_workers,
+ &ordered->work);
+ out_test:
+@@ -7939,27 +7960,6 @@ static int btrfs_truncate(struct inode *inode)
+ BUG_ON(ret);
+
+ /*
+- * setattr is responsible for setting the ordered_data_close flag,
+- * but that is only tested during the last file release. That
+- * could happen well after the next commit, leaving a great big
+- * window where new writes may get lost if someone chooses to write
+- * to this file after truncating to zero
+- *
+- * The inode doesn't have any dirty data here, and so if we commit
+- * this is a noop. If someone immediately starts writing to the inode
+- * it is very likely we'll catch some of their writes in this
+- * transaction, and the commit will find this file on the ordered
+- * data list with good things to send down.
+- *
+- * This is a best effort solution, there is still a window where
+- * using truncate to replace the contents of the file will
+- * end up with a zero length file after a crash.
+- */
+- if (inode->i_size == 0 && test_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
+- &BTRFS_I(inode)->runtime_flags))
+- btrfs_add_ordered_operation(trans, root, inode);
+-
+- /*
+ * So if we truncate and then write and fsync we normally would just
+ * write the extents that changed, which is a problem if we need to
+ * first truncate that entire inode. So set this flag so we write out
+@@ -8106,7 +8106,6 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
+ mutex_init(&ei->delalloc_mutex);
+ btrfs_ordered_inode_tree_init(&ei->ordered_tree);
+ INIT_LIST_HEAD(&ei->delalloc_inodes);
+- INIT_LIST_HEAD(&ei->ordered_operations);
+ RB_CLEAR_NODE(&ei->rb_node);
+
+ return inode;
+@@ -8146,17 +8145,6 @@ void btrfs_destroy_inode(struct inode *inode)
+ if (!root)
+ goto free;
+
+- /*
+- * Make sure we're properly removed from the ordered operation
+- * lists.
+- */
+- smp_mb();
+- if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
+- spin_lock(&root->fs_info->ordered_root_lock);
+- list_del_init(&BTRFS_I(inode)->ordered_operations);
+- spin_unlock(&root->fs_info->ordered_root_lock);
+- }
+-
+ if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+ &BTRFS_I(inode)->runtime_flags)) {
+ btrfs_info(root->fs_info, "inode %llu still on the orphan list",
+@@ -8338,12 +8326,10 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ ret = 0;
+
+ /*
+- * we're using rename to replace one file with another.
+- * and the replacement file is large. Start IO on it now so
+- * we don't add too much work to the end of the transaction
++ * we're using rename to replace one file with another. Start IO on it
++ * now so we don't add too much work to the end of the transaction
+ */
+- if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
+- old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
++ if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
+ filemap_flush(old_inode->i_mapping);
+
+ /* close the racy window with snapshot create/destroy ioctl */
+@@ -8391,12 +8377,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ */
+ btrfs_pin_log_trans(root);
+ }
+- /*
+- * make sure the inode gets flushed if it is replacing
+- * something.
+- */
+- if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
+- btrfs_add_ordered_operation(trans, root, old_inode);
+
+ inode_inc_iversion(old_dir);
+ inode_inc_iversion(new_dir);
+@@ -8514,7 +8494,9 @@ struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
+ work->inode = inode;
+ work->wait = wait;
+ work->delay_iput = delay_iput;
+- btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL);
++ WARN_ON_ONCE(!inode);
++ btrfs_init_work(&work->work, btrfs_flush_delalloc_helper,
++ btrfs_run_delalloc_work, NULL, NULL);
+
+ return work;
+ }
+diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
+index 7187b14faa6c..ac734ec4cc20 100644
+--- a/fs/btrfs/ordered-data.c
++++ b/fs/btrfs/ordered-data.c
+@@ -571,18 +571,6 @@ void btrfs_remove_ordered_extent(struct inode *inode,
+
+ trace_btrfs_ordered_extent_remove(inode, entry);
+
+- /*
+- * we have no more ordered extents for this inode and
+- * no dirty pages. We can safely remove it from the
+- * list of ordered extents
+- */
+- if (RB_EMPTY_ROOT(&tree->tree) &&
+- !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
+- spin_lock(&root->fs_info->ordered_root_lock);
+- list_del_init(&BTRFS_I(inode)->ordered_operations);
+- spin_unlock(&root->fs_info->ordered_root_lock);
+- }
+-
+ if (!root->nr_ordered_extents) {
+ spin_lock(&root->fs_info->ordered_root_lock);
+ BUG_ON(list_empty(&root->ordered_root));
+@@ -627,6 +615,7 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr)
+ spin_unlock(&root->ordered_extent_lock);
+
+ btrfs_init_work(&ordered->flush_work,
++ btrfs_flush_delalloc_helper,
+ btrfs_run_ordered_extent_work, NULL, NULL);
+ list_add_tail(&ordered->work_list, &works);
+ btrfs_queue_work(root->fs_info->flush_workers,
+@@ -687,81 +676,6 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr)
+ }
+
+ /*
+- * this is used during transaction commit to write all the inodes
+- * added to the ordered operation list. These files must be fully on
+- * disk before the transaction commits.
+- *
+- * we have two modes here, one is to just start the IO via filemap_flush
+- * and the other is to wait for all the io. When we wait, we have an
+- * extra check to make sure the ordered operation list really is empty
+- * before we return
+- */
+-int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
+- struct btrfs_root *root, int wait)
+-{
+- struct btrfs_inode *btrfs_inode;
+- struct inode *inode;
+- struct btrfs_transaction *cur_trans = trans->transaction;
+- struct list_head splice;
+- struct list_head works;
+- struct btrfs_delalloc_work *work, *next;
+- int ret = 0;
+-
+- INIT_LIST_HEAD(&splice);
+- INIT_LIST_HEAD(&works);
+-
+- mutex_lock(&root->fs_info->ordered_extent_flush_mutex);
+- spin_lock(&root->fs_info->ordered_root_lock);
+- list_splice_init(&cur_trans->ordered_operations, &splice);
+- while (!list_empty(&splice)) {
+- btrfs_inode = list_entry(splice.next, struct btrfs_inode,
+- ordered_operations);
+- inode = &btrfs_inode->vfs_inode;
+-
+- list_del_init(&btrfs_inode->ordered_operations);
+-
+- /*
+- * the inode may be getting freed (in sys_unlink path).
+- */
+- inode = igrab(inode);
+- if (!inode)
+- continue;
+-
+- if (!wait)
+- list_add_tail(&BTRFS_I(inode)->ordered_operations,
+- &cur_trans->ordered_operations);
+- spin_unlock(&root->fs_info->ordered_root_lock);
+-
+- work = btrfs_alloc_delalloc_work(inode, wait, 1);
+- if (!work) {
+- spin_lock(&root->fs_info->ordered_root_lock);
+- if (list_empty(&BTRFS_I(inode)->ordered_operations))
+- list_add_tail(&btrfs_inode->ordered_operations,
+- &splice);
+- list_splice_tail(&splice,
+- &cur_trans->ordered_operations);
+- spin_unlock(&root->fs_info->ordered_root_lock);
+- ret = -ENOMEM;
+- goto out;
+- }
+- list_add_tail(&work->list, &works);
+- btrfs_queue_work(root->fs_info->flush_workers,
+- &work->work);
+-
+- cond_resched();
+- spin_lock(&root->fs_info->ordered_root_lock);
+- }
+- spin_unlock(&root->fs_info->ordered_root_lock);
+-out:
+- list_for_each_entry_safe(work, next, &works, list) {
+- list_del_init(&work->list);
+- btrfs_wait_and_free_delalloc_work(work);
+- }
+- mutex_unlock(&root->fs_info->ordered_extent_flush_mutex);
+- return ret;
+-}
+-
+-/*
+ * Used to start IO or wait for a given ordered extent to finish.
+ *
+ * If wait is one, this effectively waits on page writeback for all the pages
+@@ -1120,42 +1034,6 @@ out:
+ return index;
+ }
+
+-
+-/*
+- * add a given inode to the list of inodes that must be fully on
+- * disk before a transaction commit finishes.
+- *
+- * This basically gives us the ext3 style data=ordered mode, and it is mostly
+- * used to make sure renamed files are fully on disk.
+- *
+- * It is a noop if the inode is already fully on disk.
+- *
+- * If trans is not null, we'll do a friendly check for a transaction that
+- * is already flushing things and force the IO down ourselves.
+- */
+-void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
+- struct btrfs_root *root, struct inode *inode)
+-{
+- struct btrfs_transaction *cur_trans = trans->transaction;
+- u64 last_mod;
+-
+- last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
+-
+- /*
+- * if this file hasn't been changed since the last transaction
+- * commit, we can safely return without doing anything
+- */
+- if (last_mod <= root->fs_info->last_trans_committed)
+- return;
+-
+- spin_lock(&root->fs_info->ordered_root_lock);
+- if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
+- list_add_tail(&BTRFS_I(inode)->ordered_operations,
+- &cur_trans->ordered_operations);
+- }
+- spin_unlock(&root->fs_info->ordered_root_lock);
+-}
+-
+ int __init ordered_data_init(void)
+ {
+ btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
+diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
+index 246897058efb..d81a274d621e 100644
+--- a/fs/btrfs/ordered-data.h
++++ b/fs/btrfs/ordered-data.h
+@@ -190,11 +190,6 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
+ struct btrfs_ordered_extent *ordered);
+ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
+ u32 *sum, int len);
+-int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
+- struct btrfs_root *root, int wait);
+-void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
+- struct btrfs_root *root,
+- struct inode *inode);
+ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr);
+ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr);
+ void btrfs_get_logged_extents(struct inode *inode,
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 98cb6b2630f9..3eec914710b2 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -2551,6 +2551,7 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
+ memset(&fs_info->qgroup_rescan_work, 0,
+ sizeof(fs_info->qgroup_rescan_work));
+ btrfs_init_work(&fs_info->qgroup_rescan_work,
++ btrfs_qgroup_rescan_helper,
+ btrfs_qgroup_rescan_worker, NULL, NULL);
+
+ if (ret) {
+diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
+index 4a88f073fdd7..0a6b6e4bcbb9 100644
+--- a/fs/btrfs/raid56.c
++++ b/fs/btrfs/raid56.c
+@@ -1416,7 +1416,8 @@ cleanup:
+
+ static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
+ {
+- btrfs_init_work(&rbio->work, rmw_work, NULL, NULL);
++ btrfs_init_work(&rbio->work, btrfs_rmw_helper,
++ rmw_work, NULL, NULL);
+
+ btrfs_queue_work(rbio->fs_info->rmw_workers,
+ &rbio->work);
+@@ -1424,7 +1425,8 @@ static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
+
+ static void async_read_rebuild(struct btrfs_raid_bio *rbio)
+ {
+- btrfs_init_work(&rbio->work, read_rebuild_work, NULL, NULL);
++ btrfs_init_work(&rbio->work, btrfs_rmw_helper,
++ read_rebuild_work, NULL, NULL);
+
+ btrfs_queue_work(rbio->fs_info->rmw_workers,
+ &rbio->work);
+@@ -1665,7 +1667,8 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
+ plug = container_of(cb, struct btrfs_plug_cb, cb);
+
+ if (from_schedule) {
+- btrfs_init_work(&plug->work, unplug_work, NULL, NULL);
++ btrfs_init_work(&plug->work, btrfs_rmw_helper,
++ unplug_work, NULL, NULL);
+ btrfs_queue_work(plug->info->rmw_workers,
+ &plug->work);
+ return;
+diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
+index 09230cf3a244..20408c6b665a 100644
+--- a/fs/btrfs/reada.c
++++ b/fs/btrfs/reada.c
+@@ -798,7 +798,8 @@ static void reada_start_machine(struct btrfs_fs_info *fs_info)
+ /* FIXME we cannot handle this properly right now */
+ BUG();
+ }
+- btrfs_init_work(&rmw->work, reada_start_machine_worker, NULL, NULL);
++ btrfs_init_work(&rmw->work, btrfs_readahead_helper,
++ reada_start_machine_worker, NULL, NULL);
+ rmw->fs_info = fs_info;
+
+ btrfs_queue_work(fs_info->readahead_workers, &rmw->work);
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index b6d198f5181e..8dddedcfa961 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -428,8 +428,8 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
+ sbio->index = i;
+ sbio->sctx = sctx;
+ sbio->page_count = 0;
+- btrfs_init_work(&sbio->work, scrub_bio_end_io_worker,
+- NULL, NULL);
++ btrfs_init_work(&sbio->work, btrfs_scrub_helper,
++ scrub_bio_end_io_worker, NULL, NULL);
+
+ if (i != SCRUB_BIOS_PER_SCTX - 1)
+ sctx->bios[i]->next_free = i + 1;
+@@ -999,8 +999,8 @@ nodatasum_case:
+ fixup_nodatasum->root = fs_info->extent_root;
+ fixup_nodatasum->mirror_num = failed_mirror_index + 1;
+ scrub_pending_trans_workers_inc(sctx);
+- btrfs_init_work(&fixup_nodatasum->work, scrub_fixup_nodatasum,
+- NULL, NULL);
++ btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
++ scrub_fixup_nodatasum, NULL, NULL);
+ btrfs_queue_work(fs_info->scrub_workers,
+ &fixup_nodatasum->work);
+ goto out;
+@@ -1616,7 +1616,8 @@ static void scrub_wr_bio_end_io(struct bio *bio, int err)
+ sbio->err = err;
+ sbio->bio = bio;
+
+- btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL);
++ btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
++ scrub_wr_bio_end_io_worker, NULL, NULL);
+ btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
+ }
+
+@@ -3203,7 +3204,8 @@ static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
+ nocow_ctx->len = len;
+ nocow_ctx->mirror_num = mirror_num;
+ nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
+- btrfs_init_work(&nocow_ctx->work, copy_nocow_pages_worker, NULL, NULL);
++ btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
++ copy_nocow_pages_worker, NULL, NULL);
+ INIT_LIST_HEAD(&nocow_ctx->inodes);
+ btrfs_queue_work(fs_info->scrub_nocow_workers,
+ &nocow_ctx->work);
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 5f379affdf23..d89c6d3542ca 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -218,7 +218,6 @@ loop:
+ spin_lock_init(&cur_trans->delayed_refs.lock);
+
+ INIT_LIST_HEAD(&cur_trans->pending_snapshots);
+- INIT_LIST_HEAD(&cur_trans->ordered_operations);
+ INIT_LIST_HEAD(&cur_trans->pending_chunks);
+ INIT_LIST_HEAD(&cur_trans->switch_commits);
+ list_add_tail(&cur_trans->list, &fs_info->trans_list);
+@@ -1612,27 +1611,6 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
+ kmem_cache_free(btrfs_trans_handle_cachep, trans);
+ }
+
+-static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
+- struct btrfs_root *root)
+-{
+- int ret;
+-
+- ret = btrfs_run_delayed_items(trans, root);
+- if (ret)
+- return ret;
+-
+- /*
+- * rename don't use btrfs_join_transaction, so, once we
+- * set the transaction to blocked above, we aren't going
+- * to get any new ordered operations. We can safely run
+- * it here and no for sure that nothing new will be added
+- * to the list
+- */
+- ret = btrfs_run_ordered_operations(trans, root, 1);
+-
+- return ret;
+-}
+-
+ static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
+ {
+ if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
+@@ -1653,13 +1631,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
+ struct btrfs_transaction *prev_trans = NULL;
+ int ret;
+
+- ret = btrfs_run_ordered_operations(trans, root, 0);
+- if (ret) {
+- btrfs_abort_transaction(trans, root, ret);
+- btrfs_end_transaction(trans, root);
+- return ret;
+- }
+-
+ /* Stop the commit early if ->aborted is set */
+ if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
+ ret = cur_trans->aborted;
+@@ -1740,7 +1711,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
+ if (ret)
+ goto cleanup_transaction;
+
+- ret = btrfs_flush_all_pending_stuffs(trans, root);
++ ret = btrfs_run_delayed_items(trans, root);
+ if (ret)
+ goto cleanup_transaction;
+
+@@ -1748,7 +1719,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
+ extwriter_counter_read(cur_trans) == 0);
+
+ /* some pending stuffs might be added after the previous flush. */
+- ret = btrfs_flush_all_pending_stuffs(trans, root);
++ ret = btrfs_run_delayed_items(trans, root);
+ if (ret)
+ goto cleanup_transaction;
+
+diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
+index 7dd558ed0716..579be51b27e5 100644
+--- a/fs/btrfs/transaction.h
++++ b/fs/btrfs/transaction.h
+@@ -55,7 +55,6 @@ struct btrfs_transaction {
+ wait_queue_head_t writer_wait;
+ wait_queue_head_t commit_wait;
+ struct list_head pending_snapshots;
+- struct list_head ordered_operations;
+ struct list_head pending_chunks;
+ struct list_head switch_commits;
+ struct btrfs_delayed_ref_root delayed_refs;
+diff --git a/fs/btrfs/ulist.h b/fs/btrfs/ulist.h
+index 7f78cbf5cf41..4c29db604bbe 100644
+--- a/fs/btrfs/ulist.h
++++ b/fs/btrfs/ulist.h
+@@ -57,6 +57,21 @@ void ulist_free(struct ulist *ulist);
+ int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask);
+ int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
+ u64 *old_aux, gfp_t gfp_mask);
++
++/* just like ulist_add_merge() but take a pointer for the aux data */
++static inline int ulist_add_merge_ptr(struct ulist *ulist, u64 val, void *aux,
++ void **old_aux, gfp_t gfp_mask)
++{
++#if BITS_PER_LONG == 32
++ u64 old64 = (uintptr_t)*old_aux;
++ int ret = ulist_add_merge(ulist, val, (uintptr_t)aux, &old64, gfp_mask);
++ *old_aux = (void *)((uintptr_t)old64);
++ return ret;
++#else
++ return ulist_add_merge(ulist, val, (u64)aux, (u64 *)old_aux, gfp_mask);
++#endif
++}
++
+ struct ulist_node *ulist_next(struct ulist *ulist,
+ struct ulist_iterator *uiter);
+
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 6cb82f62cb7c..81bec9fd8f19 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -5800,7 +5800,8 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
+ else
+ generate_random_uuid(dev->uuid);
+
+- btrfs_init_work(&dev->work, pending_bios_fn, NULL, NULL);
++ btrfs_init_work(&dev->work, btrfs_submit_helper,
++ pending_bios_fn, NULL, NULL);
+
+ return dev;
+ }
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index 8c41b52da358..16a46b6a6fee 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -534,7 +534,7 @@ EXPORT_SYMBOL_GPL(debugfs_remove);
+ */
+ void debugfs_remove_recursive(struct dentry *dentry)
+ {
+- struct dentry *child, *next, *parent;
++ struct dentry *child, *parent;
+
+ if (IS_ERR_OR_NULL(dentry))
+ return;
+@@ -546,30 +546,49 @@ void debugfs_remove_recursive(struct dentry *dentry)
+ parent = dentry;
+ down:
+ mutex_lock(&parent->d_inode->i_mutex);
+- list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) {
++ loop:
++ /*
++ * The parent->d_subdirs is protected by the d_lock. Outside that
++ * lock, the child can be unlinked and set to be freed which can
++ * use the d_u.d_child as the rcu head and corrupt this list.
++ */
++ spin_lock(&parent->d_lock);
++ list_for_each_entry(child, &parent->d_subdirs, d_u.d_child) {
+ if (!debugfs_positive(child))
+ continue;
+
+ /* perhaps simple_empty(child) makes more sense */
+ if (!list_empty(&child->d_subdirs)) {
++ spin_unlock(&parent->d_lock);
+ mutex_unlock(&parent->d_inode->i_mutex);
+ parent = child;
+ goto down;
+ }
+- up:
++
++ spin_unlock(&parent->d_lock);
++
+ if (!__debugfs_remove(child, parent))
+ simple_release_fs(&debugfs_mount, &debugfs_mount_count);
++
++ /*
++ * The parent->d_lock protects agaist child from unlinking
++ * from d_subdirs. When releasing the parent->d_lock we can
++ * no longer trust that the next pointer is valid.
++ * Restart the loop. We'll skip this one with the
++ * debugfs_positive() check.
++ */
++ goto loop;
+ }
++ spin_unlock(&parent->d_lock);
+
+ mutex_unlock(&parent->d_inode->i_mutex);
+ child = parent;
+ parent = parent->d_parent;
+ mutex_lock(&parent->d_inode->i_mutex);
+
+- if (child != dentry) {
+- next = list_next_entry(child, d_u.d_child);
+- goto up;
+- }
++ if (child != dentry)
++ /* go up */
++ goto loop;
+
+ if (!__debugfs_remove(child, parent))
+ simple_release_fs(&debugfs_mount, &debugfs_mount_count);
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 7cc5a0e23688..1bbe7c315138 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -2144,8 +2144,8 @@ extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
+ extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock);
+ extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks);
+ extern void ext4_ind_truncate(handle_t *, struct inode *inode);
+-extern int ext4_free_hole_blocks(handle_t *handle, struct inode *inode,
+- ext4_lblk_t first, ext4_lblk_t stop);
++extern int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
++ ext4_lblk_t start, ext4_lblk_t end);
+
+ /* ioctl.c */
+ extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
+@@ -2453,6 +2453,22 @@ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
+ up_write(&EXT4_I(inode)->i_data_sem);
+ }
+
++/* Update i_size, i_disksize. Requires i_mutex to avoid races with truncate */
++static inline int ext4_update_inode_size(struct inode *inode, loff_t newsize)
++{
++ int changed = 0;
++
++ if (newsize > inode->i_size) {
++ i_size_write(inode, newsize);
++ changed = 1;
++ }
++ if (newsize > EXT4_I(inode)->i_disksize) {
++ ext4_update_i_disksize(inode, newsize);
++ changed |= 2;
++ }
++ return changed;
++}
++
+ struct ext4_group_info {
+ unsigned long bb_state;
+ struct rb_root bb_free_root;
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 4da228a0e6d0..7dfd6300e1c2 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4664,7 +4664,8 @@ retry:
+ }
+
+ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
+- ext4_lblk_t len, int flags, int mode)
++ ext4_lblk_t len, loff_t new_size,
++ int flags, int mode)
+ {
+ struct inode *inode = file_inode(file);
+ handle_t *handle;
+@@ -4673,8 +4674,10 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
+ int retries = 0;
+ struct ext4_map_blocks map;
+ unsigned int credits;
++ loff_t epos;
+
+ map.m_lblk = offset;
++ map.m_len = len;
+ /*
+ * Don't normalize the request if it can fit in one extent so
+ * that it doesn't get unnecessarily split into multiple
+@@ -4689,9 +4692,7 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
+ credits = ext4_chunk_trans_blocks(inode, len);
+
+ retry:
+- while (ret >= 0 && ret < len) {
+- map.m_lblk = map.m_lblk + ret;
+- map.m_len = len = len - ret;
++ while (ret >= 0 && len) {
+ handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
+ credits);
+ if (IS_ERR(handle)) {
+@@ -4708,6 +4709,21 @@ retry:
+ ret2 = ext4_journal_stop(handle);
+ break;
+ }
++ map.m_lblk += ret;
++ map.m_len = len = len - ret;
++ epos = (loff_t)map.m_lblk << inode->i_blkbits;
++ inode->i_ctime = ext4_current_time(inode);
++ if (new_size) {
++ if (epos > new_size)
++ epos = new_size;
++ if (ext4_update_inode_size(inode, epos) & 0x1)
++ inode->i_mtime = inode->i_ctime;
++ } else {
++ if (epos > inode->i_size)
++ ext4_set_inode_flag(inode,
++ EXT4_INODE_EOFBLOCKS);
++ }
++ ext4_mark_inode_dirty(handle, inode);
+ ret2 = ext4_journal_stop(handle);
+ if (ret2)
+ break;
+@@ -4730,7 +4746,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ loff_t new_size = 0;
+ int ret = 0;
+ int flags;
+- int partial;
++ int credits;
++ int partial_begin, partial_end;
+ loff_t start, end;
+ ext4_lblk_t lblk;
+ struct address_space *mapping = inode->i_mapping;
+@@ -4770,7 +4787,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+
+ if (start < offset || end > offset + len)
+ return -EINVAL;
+- partial = (offset + len) & ((1 << blkbits) - 1);
++ partial_begin = offset & ((1 << blkbits) - 1);
++ partial_end = (offset + len) & ((1 << blkbits) - 1);
+
+ lblk = start >> blkbits;
+ max_blocks = (end >> blkbits);
+@@ -4804,7 +4822,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ * If we have a partial block after EOF we have to allocate
+ * the entire block.
+ */
+- if (partial)
++ if (partial_end)
+ max_blocks += 1;
+ }
+
+@@ -4812,6 +4830,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+
+ /* Now release the pages and zero block aligned part of pages*/
+ truncate_pagecache_range(inode, start, end - 1);
++ inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+
+ /* Wait all existing dio workers, newcomers will block on i_mutex */
+ ext4_inode_block_unlocked_dio(inode);
+@@ -4824,13 +4843,22 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ if (ret)
+ goto out_dio;
+
+- ret = ext4_alloc_file_blocks(file, lblk, max_blocks, flags,
+- mode);
++ ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
++ flags, mode);
+ if (ret)
+ goto out_dio;
+ }
++ if (!partial_begin && !partial_end)
++ goto out_dio;
+
+- handle = ext4_journal_start(inode, EXT4_HT_MISC, 4);
++ /*
++ * In worst case we have to writeout two nonadjacent unwritten
++ * blocks and update the inode
++ */
++ credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1;
++ if (ext4_should_journal_data(inode))
++ credits += 2;
++ handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ ext4_std_error(inode->i_sb, ret);
+@@ -4838,12 +4866,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ }
+
+ inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+-
+ if (new_size) {
+- if (new_size > i_size_read(inode))
+- i_size_write(inode, new_size);
+- if (new_size > EXT4_I(inode)->i_disksize)
+- ext4_update_i_disksize(inode, new_size);
++ ext4_update_inode_size(inode, new_size);
+ } else {
+ /*
+ * Mark that we allocate beyond EOF so the subsequent truncate
+@@ -4852,7 +4876,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ if ((offset + len) > i_size_read(inode))
+ ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
+ }
+-
+ ext4_mark_inode_dirty(handle, inode);
+
+ /* Zero out partial block at the edges of the range */
+@@ -4879,13 +4902,11 @@ out_mutex:
+ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+ {
+ struct inode *inode = file_inode(file);
+- handle_t *handle;
+ loff_t new_size = 0;
+ unsigned int max_blocks;
+ int ret = 0;
+ int flags;
+ ext4_lblk_t lblk;
+- struct timespec tv;
+ unsigned int blkbits = inode->i_blkbits;
+
+ /* Return error if mode is not supported */
+@@ -4936,36 +4957,15 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+ goto out;
+ }
+
+- ret = ext4_alloc_file_blocks(file, lblk, max_blocks, flags, mode);
++ ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
++ flags, mode);
+ if (ret)
+ goto out;
+
+- handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+- if (IS_ERR(handle))
+- goto out;
+-
+- tv = inode->i_ctime = ext4_current_time(inode);
+-
+- if (new_size) {
+- if (new_size > i_size_read(inode)) {
+- i_size_write(inode, new_size);
+- inode->i_mtime = tv;
+- }
+- if (new_size > EXT4_I(inode)->i_disksize)
+- ext4_update_i_disksize(inode, new_size);
+- } else {
+- /*
+- * Mark that we allocate beyond EOF so the subsequent truncate
+- * can proceed even if the new size is the same as i_size.
+- */
+- if ((offset + len) > i_size_read(inode))
+- ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
++ if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
++ ret = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal,
++ EXT4_I(inode)->i_sync_tid);
+ }
+- ext4_mark_inode_dirty(handle, inode);
+- if (file->f_flags & O_SYNC)
+- ext4_handle_sync(handle);
+-
+- ext4_journal_stop(handle);
+ out:
+ mutex_unlock(&inode->i_mutex);
+ trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
+diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
+index fd69da194826..e75f840000a0 100644
+--- a/fs/ext4/indirect.c
++++ b/fs/ext4/indirect.c
+@@ -1295,97 +1295,220 @@ do_indirects:
+ }
+ }
+
+-static int free_hole_blocks(handle_t *handle, struct inode *inode,
+- struct buffer_head *parent_bh, __le32 *i_data,
+- int level, ext4_lblk_t first,
+- ext4_lblk_t count, int max)
++/**
++ * ext4_ind_remove_space - remove space from the range
++ * @handle: JBD handle for this transaction
++ * @inode: inode we are dealing with
++ * @start: First block to remove
++ * @end: One block after the last block to remove (exclusive)
++ *
++ * Free the blocks in the defined range (end is exclusive endpoint of
++ * range). This is used by ext4_punch_hole().
++ */
++int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
++ ext4_lblk_t start, ext4_lblk_t end)
+ {
+- struct buffer_head *bh = NULL;
++ struct ext4_inode_info *ei = EXT4_I(inode);
++ __le32 *i_data = ei->i_data;
+ int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
+- int ret = 0;
+- int i, inc;
+- ext4_lblk_t offset;
+- __le32 blk;
+-
+- inc = 1 << ((EXT4_BLOCK_SIZE_BITS(inode->i_sb) - 2) * level);
+- for (i = 0, offset = 0; i < max; i++, i_data++, offset += inc) {
+- if (offset >= count + first)
+- break;
+- if (*i_data == 0 || (offset + inc) <= first)
+- continue;
+- blk = *i_data;
+- if (level > 0) {
+- ext4_lblk_t first2;
+- ext4_lblk_t count2;
++ ext4_lblk_t offsets[4], offsets2[4];
++ Indirect chain[4], chain2[4];
++ Indirect *partial, *partial2;
++ ext4_lblk_t max_block;
++ __le32 nr = 0, nr2 = 0;
++ int n = 0, n2 = 0;
++ unsigned blocksize = inode->i_sb->s_blocksize;
+
+- bh = sb_bread(inode->i_sb, le32_to_cpu(blk));
+- if (!bh) {
+- EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk),
+- "Read failure");
+- return -EIO;
+- }
+- if (first > offset) {
+- first2 = first - offset;
+- count2 = count;
++ max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
++ >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
++ if (end >= max_block)
++ end = max_block;
++ if ((start >= end) || (start > max_block))
++ return 0;
++
++ n = ext4_block_to_path(inode, start, offsets, NULL);
++ n2 = ext4_block_to_path(inode, end, offsets2, NULL);
++
++ BUG_ON(n > n2);
++
++ if ((n == 1) && (n == n2)) {
++ /* We're punching only within direct block range */
++ ext4_free_data(handle, inode, NULL, i_data + offsets[0],
++ i_data + offsets2[0]);
++ return 0;
++ } else if (n2 > n) {
++ /*
++ * Start and end are on a different levels so we're going to
++ * free partial block at start, and partial block at end of
++ * the range. If there are some levels in between then
++ * do_indirects label will take care of that.
++ */
++
++ if (n == 1) {
++ /*
++ * Start is at the direct block level, free
++ * everything to the end of the level.
++ */
++ ext4_free_data(handle, inode, NULL, i_data + offsets[0],
++ i_data + EXT4_NDIR_BLOCKS);
++ goto end_range;
++ }
++
++
++ partial = ext4_find_shared(inode, n, offsets, chain, &nr);
++ if (nr) {
++ if (partial == chain) {
++ /* Shared branch grows from the inode */
++ ext4_free_branches(handle, inode, NULL,
++ &nr, &nr+1, (chain+n-1) - partial);
++ *partial->p = 0;
+ } else {
+- first2 = 0;
+- count2 = count - (offset - first);
++ /* Shared branch grows from an indirect block */
++ BUFFER_TRACE(partial->bh, "get_write_access");
++ ext4_free_branches(handle, inode, partial->bh,
++ partial->p,
++ partial->p+1, (chain+n-1) - partial);
+ }
+- ret = free_hole_blocks(handle, inode, bh,
+- (__le32 *)bh->b_data, level - 1,
+- first2, count2,
+- inode->i_sb->s_blocksize >> 2);
+- if (ret) {
+- brelse(bh);
+- goto err;
++ }
++
++ /*
++ * Clear the ends of indirect blocks on the shared branch
++ * at the start of the range
++ */
++ while (partial > chain) {
++ ext4_free_branches(handle, inode, partial->bh,
++ partial->p + 1,
++ (__le32 *)partial->bh->b_data+addr_per_block,
++ (chain+n-1) - partial);
++ BUFFER_TRACE(partial->bh, "call brelse");
++ brelse(partial->bh);
++ partial--;
++ }
++
++end_range:
++ partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
++ if (nr2) {
++ if (partial2 == chain2) {
++ /*
++ * Remember, end is exclusive so here we're at
++ * the start of the next level we're not going
++ * to free. Everything was covered by the start
++ * of the range.
++ */
++ return 0;
++ } else {
++ /* Shared branch grows from an indirect block */
++ partial2--;
+ }
++ } else {
++ /*
++ * ext4_find_shared returns Indirect structure which
++ * points to the last element which should not be
++ * removed by truncate. But this is end of the range
++ * in punch_hole so we need to point to the next element
++ */
++ partial2->p++;
+ }
+- if (level == 0 ||
+- (bh && all_zeroes((__le32 *)bh->b_data,
+- (__le32 *)bh->b_data + addr_per_block))) {
+- ext4_free_data(handle, inode, parent_bh,
+- i_data, i_data + 1);
++
++ /*
++ * Clear the ends of indirect blocks on the shared branch
++ * at the end of the range
++ */
++ while (partial2 > chain2) {
++ ext4_free_branches(handle, inode, partial2->bh,
++ (__le32 *)partial2->bh->b_data,
++ partial2->p,
++ (chain2+n2-1) - partial2);
++ BUFFER_TRACE(partial2->bh, "call brelse");
++ brelse(partial2->bh);
++ partial2--;
+ }
+- brelse(bh);
+- bh = NULL;
++ goto do_indirects;
+ }
+
+-err:
+- return ret;
+-}
+-
+-int ext4_free_hole_blocks(handle_t *handle, struct inode *inode,
+- ext4_lblk_t first, ext4_lblk_t stop)
+-{
+- int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
+- int level, ret = 0;
+- int num = EXT4_NDIR_BLOCKS;
+- ext4_lblk_t count, max = EXT4_NDIR_BLOCKS;
+- __le32 *i_data = EXT4_I(inode)->i_data;
+-
+- count = stop - first;
+- for (level = 0; level < 4; level++, max *= addr_per_block) {
+- if (first < max) {
+- ret = free_hole_blocks(handle, inode, NULL, i_data,
+- level, first, count, num);
+- if (ret)
+- goto err;
+- if (count > max - first)
+- count -= max - first;
+- else
+- break;
+- first = 0;
+- } else {
+- first -= max;
++ /* Punch happened within the same level (n == n2) */
++ partial = ext4_find_shared(inode, n, offsets, chain, &nr);
++ partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
++ /*
++ * ext4_find_shared returns Indirect structure which
++ * points to the last element which should not be
++ * removed by truncate. But this is end of the range
++ * in punch_hole so we need to point to the next element
++ */
++ partial2->p++;
++ while ((partial > chain) || (partial2 > chain2)) {
++ /* We're at the same block, so we're almost finished */
++ if ((partial->bh && partial2->bh) &&
++ (partial->bh->b_blocknr == partial2->bh->b_blocknr)) {
++ if ((partial > chain) && (partial2 > chain2)) {
++ ext4_free_branches(handle, inode, partial->bh,
++ partial->p + 1,
++ partial2->p,
++ (chain+n-1) - partial);
++ BUFFER_TRACE(partial->bh, "call brelse");
++ brelse(partial->bh);
++ BUFFER_TRACE(partial2->bh, "call brelse");
++ brelse(partial2->bh);
++ }
++ return 0;
+ }
+- i_data += num;
+- if (level == 0) {
+- num = 1;
+- max = 1;
++ /*
++ * Clear the ends of indirect blocks on the shared branch
++ * at the start of the range
++ */
++ if (partial > chain) {
++ ext4_free_branches(handle, inode, partial->bh,
++ partial->p + 1,
++ (__le32 *)partial->bh->b_data+addr_per_block,
++ (chain+n-1) - partial);
++ BUFFER_TRACE(partial->bh, "call brelse");
++ brelse(partial->bh);
++ partial--;
++ }
++ /*
++ * Clear the ends of indirect blocks on the shared branch
++ * at the end of the range
++ */
++ if (partial2 > chain2) {
++ ext4_free_branches(handle, inode, partial2->bh,
++ (__le32 *)partial2->bh->b_data,
++ partial2->p,
++ (chain2+n-1) - partial2);
++ BUFFER_TRACE(partial2->bh, "call brelse");
++ brelse(partial2->bh);
++ partial2--;
+ }
+ }
+
+-err:
+- return ret;
++do_indirects:
++ /* Kill the remaining (whole) subtrees */
++ switch (offsets[0]) {
++ default:
++ if (++n >= n2)
++ return 0;
++ nr = i_data[EXT4_IND_BLOCK];
++ if (nr) {
++ ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
++ i_data[EXT4_IND_BLOCK] = 0;
++ }
++ case EXT4_IND_BLOCK:
++ if (++n >= n2)
++ return 0;
++ nr = i_data[EXT4_DIND_BLOCK];
++ if (nr) {
++ ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
++ i_data[EXT4_DIND_BLOCK] = 0;
++ }
++ case EXT4_DIND_BLOCK:
++ if (++n >= n2)
++ return 0;
++ nr = i_data[EXT4_TIND_BLOCK];
++ if (nr) {
++ ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
++ i_data[EXT4_TIND_BLOCK] = 0;
++ }
++ case EXT4_TIND_BLOCK:
++ ;
++ }
++ return 0;
+ }
+-
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 8a064734e6eb..e9c9b5bd906a 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1092,27 +1092,11 @@ static int ext4_write_end(struct file *file,
+ } else
+ copied = block_write_end(file, mapping, pos,
+ len, copied, page, fsdata);
+-
+ /*
+- * No need to use i_size_read() here, the i_size
+- * cannot change under us because we hole i_mutex.
+- *
+- * But it's important to update i_size while still holding page lock:
++ * it's important to update i_size while still holding page lock:
+ * page writeout could otherwise come in and zero beyond i_size.
+ */
+- if (pos + copied > inode->i_size) {
+- i_size_write(inode, pos + copied);
+- i_size_changed = 1;
+- }
+-
+- if (pos + copied > EXT4_I(inode)->i_disksize) {
+- /* We need to mark inode dirty even if
+- * new_i_size is less that inode->i_size
+- * but greater than i_disksize. (hint delalloc)
+- */
+- ext4_update_i_disksize(inode, (pos + copied));
+- i_size_changed = 1;
+- }
++ i_size_changed = ext4_update_inode_size(inode, pos + copied);
+ unlock_page(page);
+ page_cache_release(page);
+
+@@ -1160,7 +1144,7 @@ static int ext4_journalled_write_end(struct file *file,
+ int ret = 0, ret2;
+ int partial = 0;
+ unsigned from, to;
+- loff_t new_i_size;
++ int size_changed = 0;
+
+ trace_ext4_journalled_write_end(inode, pos, len, copied);
+ from = pos & (PAGE_CACHE_SIZE - 1);
+@@ -1183,20 +1167,18 @@ static int ext4_journalled_write_end(struct file *file,
+ if (!partial)
+ SetPageUptodate(page);
+ }
+- new_i_size = pos + copied;
+- if (new_i_size > inode->i_size)
+- i_size_write(inode, pos+copied);
++ size_changed = ext4_update_inode_size(inode, pos + copied);
+ ext4_set_inode_state(inode, EXT4_STATE_JDATA);
+ EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
+- if (new_i_size > EXT4_I(inode)->i_disksize) {
+- ext4_update_i_disksize(inode, new_i_size);
++ unlock_page(page);
++ page_cache_release(page);
++
++ if (size_changed) {
+ ret2 = ext4_mark_inode_dirty(handle, inode);
+ if (!ret)
+ ret = ret2;
+ }
+
+- unlock_page(page);
+- page_cache_release(page);
+ if (pos + len > inode->i_size && ext4_can_truncate(inode))
+ /* if we have allocated more blocks and copied
+ * less. We will have blocks allocated outside
+@@ -2212,6 +2194,7 @@ static int mpage_map_and_submit_extent(handle_t *handle,
+ struct ext4_map_blocks *map = &mpd->map;
+ int err;
+ loff_t disksize;
++ int progress = 0;
+
+ mpd->io_submit.io_end->offset =
+ ((loff_t)map->m_lblk) << inode->i_blkbits;
+@@ -2228,8 +2211,11 @@ static int mpage_map_and_submit_extent(handle_t *handle,
+ * is non-zero, a commit should free up blocks.
+ */
+ if ((err == -ENOMEM) ||
+- (err == -ENOSPC && ext4_count_free_clusters(sb)))
++ (err == -ENOSPC && ext4_count_free_clusters(sb))) {
++ if (progress)
++ goto update_disksize;
+ return err;
++ }
+ ext4_msg(sb, KERN_CRIT,
+ "Delayed block allocation failed for "
+ "inode %lu at logical offset %llu with"
+@@ -2246,15 +2232,17 @@ static int mpage_map_and_submit_extent(handle_t *handle,
+ *give_up_on_write = true;
+ return err;
+ }
++ progress = 1;
+ /*
+ * Update buffer state, submit mapped pages, and get us new
+ * extent to map
+ */
+ err = mpage_map_and_submit_buffers(mpd);
+ if (err < 0)
+- return err;
++ goto update_disksize;
+ } while (map->m_len);
+
++update_disksize:
+ /*
+ * Update on-disk size after IO is submitted. Races with
+ * truncate are avoided by checking i_size under i_data_sem.
+@@ -3624,7 +3612,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
+ ret = ext4_ext_remove_space(inode, first_block,
+ stop_block - 1);
+ else
+- ret = ext4_free_hole_blocks(handle, inode, first_block,
++ ret = ext4_ind_remove_space(handle, inode, first_block,
+ stop_block);
+
+ up_write(&EXT4_I(inode)->i_data_sem);
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 2dcb936be90e..c3e7418a6811 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1412,6 +1412,8 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
+ int last = first + count - 1;
+ struct super_block *sb = e4b->bd_sb;
+
++ if (WARN_ON(count == 0))
++ return;
+ BUG_ON(last >= (sb->s_blocksize << 3));
+ assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
+ /* Don't bother if the block group is corrupt. */
+@@ -3216,8 +3218,30 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
+ static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
+ {
+ struct ext4_prealloc_space *pa = ac->ac_pa;
++ struct ext4_buddy e4b;
++ int err;
+
+- if (pa && pa->pa_type == MB_INODE_PA)
++ if (pa == NULL) {
++ if (ac->ac_f_ex.fe_len == 0)
++ return;
++ err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
++ if (err) {
++ /*
++ * This should never happen since we pin the
++ * pages in the ext4_allocation_context so
++ * ext4_mb_load_buddy() should never fail.
++ */
++ WARN(1, "mb_load_buddy failed (%d)", err);
++ return;
++ }
++ ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
++ mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
++ ac->ac_f_ex.fe_len);
++ ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
++ ext4_mb_unload_buddy(&e4b);
++ return;
++ }
++ if (pa->pa_type == MB_INODE_PA)
+ pa->pa_free += ac->ac_b_ex.fe_len;
+ }
+
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 3520ab8a6639..9e6eced1605b 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -3128,7 +3128,8 @@ static int ext4_find_delete_entry(handle_t *handle, struct inode *dir,
+ return retval;
+ }
+
+-static void ext4_rename_delete(handle_t *handle, struct ext4_renament *ent)
++static void ext4_rename_delete(handle_t *handle, struct ext4_renament *ent,
++ int force_reread)
+ {
+ int retval;
+ /*
+@@ -3140,7 +3141,8 @@ static void ext4_rename_delete(handle_t *handle, struct ext4_renament *ent)
+ if (le32_to_cpu(ent->de->inode) != ent->inode->i_ino ||
+ ent->de->name_len != ent->dentry->d_name.len ||
+ strncmp(ent->de->name, ent->dentry->d_name.name,
+- ent->de->name_len)) {
++ ent->de->name_len) ||
++ force_reread) {
+ retval = ext4_find_delete_entry(handle, ent->dir,
+ &ent->dentry->d_name);
+ } else {
+@@ -3191,6 +3193,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
+ .dentry = new_dentry,
+ .inode = new_dentry->d_inode,
+ };
++ int force_reread;
+ int retval;
+
+ dquot_initialize(old.dir);
+@@ -3246,6 +3249,15 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
+ if (retval)
+ goto end_rename;
+ }
++ /*
++ * If we're renaming a file within an inline_data dir and adding or
++ * setting the new dirent causes a conversion from inline_data to
++ * extents/blockmap, we need to force the dirent delete code to
++ * re-read the directory, or else we end up trying to delete a dirent
++ * from what is now the extent tree root (or a block map).
++ */
++ force_reread = (new.dir->i_ino == old.dir->i_ino &&
++ ext4_test_inode_flag(new.dir, EXT4_INODE_INLINE_DATA));
+ if (!new.bh) {
+ retval = ext4_add_entry(handle, new.dentry, old.inode);
+ if (retval)
+@@ -3256,6 +3268,9 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
+ if (retval)
+ goto end_rename;
+ }
++ if (force_reread)
++ force_reread = !ext4_test_inode_flag(new.dir,
++ EXT4_INODE_INLINE_DATA);
+
+ /*
+ * Like most other Unix systems, set the ctime for inodes on a
+@@ -3267,7 +3282,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
+ /*
+ * ok, that's it
+ */
+- ext4_rename_delete(handle, &old);
++ ext4_rename_delete(handle, &old, force_reread);
+
+ if (new.inode) {
+ ext4_dec_count(handle, new.inode);
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 6df7bc611dbd..beeb5c4e1f9d 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3185,9 +3185,9 @@ static int set_journal_csum_feature_set(struct super_block *sb)
+
+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+ EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
+- /* journal checksum v2 */
++ /* journal checksum v3 */
+ compat = 0;
+- incompat = JBD2_FEATURE_INCOMPAT_CSUM_V2;
++ incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
+ } else {
+ /* journal checksum v1 */
+ compat = JBD2_FEATURE_COMPAT_CHECKSUM;
+@@ -3209,6 +3209,7 @@ static int set_journal_csum_feature_set(struct super_block *sb)
+ jbd2_journal_clear_features(sbi->s_journal,
+ JBD2_FEATURE_COMPAT_CHECKSUM, 0,
+ JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
++ JBD2_FEATURE_INCOMPAT_CSUM_V3 |
+ JBD2_FEATURE_INCOMPAT_CSUM_V2);
+ }
+
+diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
+index 4556ce1af5b0..5ddaf8625d3b 100644
+--- a/fs/isofs/inode.c
++++ b/fs/isofs/inode.c
+@@ -61,7 +61,7 @@ static void isofs_put_super(struct super_block *sb)
+ return;
+ }
+
+-static int isofs_read_inode(struct inode *);
++static int isofs_read_inode(struct inode *, int relocated);
+ static int isofs_statfs (struct dentry *, struct kstatfs *);
+
+ static struct kmem_cache *isofs_inode_cachep;
+@@ -1259,7 +1259,7 @@ out_toomany:
+ goto out;
+ }
+
+-static int isofs_read_inode(struct inode *inode)
++static int isofs_read_inode(struct inode *inode, int relocated)
+ {
+ struct super_block *sb = inode->i_sb;
+ struct isofs_sb_info *sbi = ISOFS_SB(sb);
+@@ -1404,7 +1404,7 @@ static int isofs_read_inode(struct inode *inode)
+ */
+
+ if (!high_sierra) {
+- parse_rock_ridge_inode(de, inode);
++ parse_rock_ridge_inode(de, inode, relocated);
+ /* if we want uid/gid set, override the rock ridge setting */
+ if (sbi->s_uid_set)
+ inode->i_uid = sbi->s_uid;
+@@ -1483,9 +1483,10 @@ static int isofs_iget5_set(struct inode *ino, void *data)
+ * offset that point to the underlying meta-data for the inode. The
+ * code below is otherwise similar to the iget() code in
+ * include/linux/fs.h */
+-struct inode *isofs_iget(struct super_block *sb,
+- unsigned long block,
+- unsigned long offset)
++struct inode *__isofs_iget(struct super_block *sb,
++ unsigned long block,
++ unsigned long offset,
++ int relocated)
+ {
+ unsigned long hashval;
+ struct inode *inode;
+@@ -1507,7 +1508,7 @@ struct inode *isofs_iget(struct super_block *sb,
+ return ERR_PTR(-ENOMEM);
+
+ if (inode->i_state & I_NEW) {
+- ret = isofs_read_inode(inode);
++ ret = isofs_read_inode(inode, relocated);
+ if (ret < 0) {
+ iget_failed(inode);
+ inode = ERR_PTR(ret);
+diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
+index 99167238518d..0ac4c1f73fbd 100644
+--- a/fs/isofs/isofs.h
++++ b/fs/isofs/isofs.h
+@@ -107,7 +107,7 @@ extern int iso_date(char *, int);
+
+ struct inode; /* To make gcc happy */
+
+-extern int parse_rock_ridge_inode(struct iso_directory_record *, struct inode *);
++extern int parse_rock_ridge_inode(struct iso_directory_record *, struct inode *, int relocated);
+ extern int get_rock_ridge_filename(struct iso_directory_record *, char *, struct inode *);
+ extern int isofs_name_translate(struct iso_directory_record *, char *, struct inode *);
+
+@@ -118,9 +118,24 @@ extern struct dentry *isofs_lookup(struct inode *, struct dentry *, unsigned int
+ extern struct buffer_head *isofs_bread(struct inode *, sector_t);
+ extern int isofs_get_blocks(struct inode *, sector_t, struct buffer_head **, unsigned long);
+
+-extern struct inode *isofs_iget(struct super_block *sb,
+- unsigned long block,
+- unsigned long offset);
++struct inode *__isofs_iget(struct super_block *sb,
++ unsigned long block,
++ unsigned long offset,
++ int relocated);
++
++static inline struct inode *isofs_iget(struct super_block *sb,
++ unsigned long block,
++ unsigned long offset)
++{
++ return __isofs_iget(sb, block, offset, 0);
++}
++
++static inline struct inode *isofs_iget_reloc(struct super_block *sb,
++ unsigned long block,
++ unsigned long offset)
++{
++ return __isofs_iget(sb, block, offset, 1);
++}
+
+ /* Because the inode number is no longer relevant to finding the
+ * underlying meta-data for an inode, we are free to choose a more
+diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
+index c0bf42472e40..f488bbae541a 100644
+--- a/fs/isofs/rock.c
++++ b/fs/isofs/rock.c
+@@ -288,12 +288,16 @@ eio:
+ goto out;
+ }
+
++#define RR_REGARD_XA 1
++#define RR_RELOC_DE 2
++
+ static int
+ parse_rock_ridge_inode_internal(struct iso_directory_record *de,
+- struct inode *inode, int regard_xa)
++ struct inode *inode, int flags)
+ {
+ int symlink_len = 0;
+ int cnt, sig;
++ unsigned int reloc_block;
+ struct inode *reloc;
+ struct rock_ridge *rr;
+ int rootflag;
+@@ -305,7 +309,7 @@ parse_rock_ridge_inode_internal(struct iso_directory_record *de,
+
+ init_rock_state(&rs, inode);
+ setup_rock_ridge(de, inode, &rs);
+- if (regard_xa) {
++ if (flags & RR_REGARD_XA) {
+ rs.chr += 14;
+ rs.len -= 14;
+ if (rs.len < 0)
+@@ -485,12 +489,22 @@ repeat:
+ "relocated directory\n");
+ goto out;
+ case SIG('C', 'L'):
+- ISOFS_I(inode)->i_first_extent =
+- isonum_733(rr->u.CL.location);
+- reloc =
+- isofs_iget(inode->i_sb,
+- ISOFS_I(inode)->i_first_extent,
+- 0);
++ if (flags & RR_RELOC_DE) {
++ printk(KERN_ERR
++ "ISOFS: Recursive directory relocation "
++ "is not supported\n");
++ goto eio;
++ }
++ reloc_block = isonum_733(rr->u.CL.location);
++ if (reloc_block == ISOFS_I(inode)->i_iget5_block &&
++ ISOFS_I(inode)->i_iget5_offset == 0) {
++ printk(KERN_ERR
++ "ISOFS: Directory relocation points to "
++ "itself\n");
++ goto eio;
++ }
++ ISOFS_I(inode)->i_first_extent = reloc_block;
++ reloc = isofs_iget_reloc(inode->i_sb, reloc_block, 0);
+ if (IS_ERR(reloc)) {
+ ret = PTR_ERR(reloc);
+ goto out;
+@@ -637,9 +651,11 @@ static char *get_symlink_chunk(char *rpnt, struct rock_ridge *rr, char *plimit)
+ return rpnt;
+ }
+
+-int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode)
++int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode,
++ int relocated)
+ {
+- int result = parse_rock_ridge_inode_internal(de, inode, 0);
++ int flags = relocated ? RR_RELOC_DE : 0;
++ int result = parse_rock_ridge_inode_internal(de, inode, flags);
+
+ /*
+ * if rockridge flag was reset and we didn't look for attributes
+@@ -647,7 +663,8 @@ int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode)
+ */
+ if ((ISOFS_SB(inode->i_sb)->s_rock_offset == -1)
+ && (ISOFS_SB(inode->i_sb)->s_rock == 2)) {
+- result = parse_rock_ridge_inode_internal(de, inode, 14);
++ result = parse_rock_ridge_inode_internal(de, inode,
++ flags | RR_REGARD_XA);
+ }
+ return result;
+ }
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index 6fac74349856..b73e0215baa7 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -97,7 +97,7 @@ static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
+ struct commit_header *h;
+ __u32 csum;
+
+- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++ if (!jbd2_journal_has_csum_v2or3(j))
+ return;
+
+ h = (struct commit_header *)(bh->b_data);
+@@ -313,11 +313,11 @@ static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
+ return checksum;
+ }
+
+-static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
++static void write_tag_block(journal_t *j, journal_block_tag_t *tag,
+ unsigned long long block)
+ {
+ tag->t_blocknr = cpu_to_be32(block & (u32)~0);
+- if (tag_bytes > JBD2_TAG_SIZE32)
++ if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_64BIT))
+ tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
+ }
+
+@@ -327,7 +327,7 @@ static void jbd2_descr_block_csum_set(journal_t *j,
+ struct jbd2_journal_block_tail *tail;
+ __u32 csum;
+
+- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++ if (!jbd2_journal_has_csum_v2or3(j))
+ return;
+
+ tail = (struct jbd2_journal_block_tail *)(bh->b_data + j->j_blocksize -
+@@ -340,12 +340,13 @@ static void jbd2_descr_block_csum_set(journal_t *j,
+ static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
+ struct buffer_head *bh, __u32 sequence)
+ {
++ journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
+ struct page *page = bh->b_page;
+ __u8 *addr;
+ __u32 csum32;
+ __be32 seq;
+
+- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++ if (!jbd2_journal_has_csum_v2or3(j))
+ return;
+
+ seq = cpu_to_be32(sequence);
+@@ -355,8 +356,10 @@ static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
+ bh->b_size);
+ kunmap_atomic(addr);
+
+- /* We only have space to store the lower 16 bits of the crc32c. */
+- tag->t_checksum = cpu_to_be16(csum32);
++ if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V3))
++ tag3->t_checksum = cpu_to_be32(csum32);
++ else
++ tag->t_checksum = cpu_to_be16(csum32);
+ }
+ /*
+ * jbd2_journal_commit_transaction
+@@ -396,7 +399,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
+ LIST_HEAD(io_bufs);
+ LIST_HEAD(log_bufs);
+
+- if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++ if (jbd2_journal_has_csum_v2or3(journal))
+ csum_size = sizeof(struct jbd2_journal_block_tail);
+
+ /*
+@@ -690,7 +693,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
+ tag_flag |= JBD2_FLAG_SAME_UUID;
+
+ tag = (journal_block_tag_t *) tagp;
+- write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
++ write_tag_block(journal, tag, jh2bh(jh)->b_blocknr);
+ tag->t_flags = cpu_to_be16(tag_flag);
+ jbd2_block_tag_csum_set(journal, tag, wbuf[bufs],
+ commit_transaction->t_tid);
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 67b8e303946c..19d74d86d99c 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -124,7 +124,7 @@ EXPORT_SYMBOL(__jbd2_debug);
+ /* Checksumming functions */
+ static int jbd2_verify_csum_type(journal_t *j, journal_superblock_t *sb)
+ {
+- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++ if (!jbd2_journal_has_csum_v2or3(j))
+ return 1;
+
+ return sb->s_checksum_type == JBD2_CRC32C_CHKSUM;
+@@ -145,7 +145,7 @@ static __be32 jbd2_superblock_csum(journal_t *j, journal_superblock_t *sb)
+
+ static int jbd2_superblock_csum_verify(journal_t *j, journal_superblock_t *sb)
+ {
+- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++ if (!jbd2_journal_has_csum_v2or3(j))
+ return 1;
+
+ return sb->s_checksum == jbd2_superblock_csum(j, sb);
+@@ -153,7 +153,7 @@ static int jbd2_superblock_csum_verify(journal_t *j, journal_superblock_t *sb)
+
+ static void jbd2_superblock_csum_set(journal_t *j, journal_superblock_t *sb)
+ {
+- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++ if (!jbd2_journal_has_csum_v2or3(j))
+ return;
+
+ sb->s_checksum = jbd2_superblock_csum(j, sb);
+@@ -1522,21 +1522,29 @@ static int journal_get_superblock(journal_t *journal)
+ goto out;
+ }
+
+- if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) &&
+- JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
++ if (jbd2_journal_has_csum_v2or3(journal) &&
++ JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM)) {
+ /* Can't have checksum v1 and v2 on at the same time! */
+ printk(KERN_ERR "JBD2: Can't enable checksumming v1 and v2 "
+ "at the same time!\n");
+ goto out;
+ }
+
++ if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) &&
++ JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
++ /* Can't have checksum v2 and v3 at the same time! */
++ printk(KERN_ERR "JBD2: Can't enable checksumming v2 and v3 "
++ "at the same time!\n");
++ goto out;
++ }
++
+ if (!jbd2_verify_csum_type(journal, sb)) {
+ printk(KERN_ERR "JBD2: Unknown checksum type\n");
+ goto out;
+ }
+
+ /* Load the checksum driver */
+- if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
++ if (jbd2_journal_has_csum_v2or3(journal)) {
+ journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
+ if (IS_ERR(journal->j_chksum_driver)) {
+ printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n");
+@@ -1553,7 +1561,7 @@ static int journal_get_superblock(journal_t *journal)
+ }
+
+ /* Precompute checksum seed for all metadata */
+- if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++ if (jbd2_journal_has_csum_v2or3(journal))
+ journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid,
+ sizeof(sb->s_uuid));
+
+@@ -1813,8 +1821,14 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
+ if (!jbd2_journal_check_available_features(journal, compat, ro, incompat))
+ return 0;
+
+- /* Asking for checksumming v2 and v1? Only give them v2. */
+- if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V2 &&
++ /* If enabling v2 checksums, turn on v3 instead */
++ if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V2) {
++ incompat &= ~JBD2_FEATURE_INCOMPAT_CSUM_V2;
++ incompat |= JBD2_FEATURE_INCOMPAT_CSUM_V3;
++ }
++
++ /* Asking for checksumming v3 and v1? Only give them v3. */
++ if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V3 &&
+ compat & JBD2_FEATURE_COMPAT_CHECKSUM)
+ compat &= ~JBD2_FEATURE_COMPAT_CHECKSUM;
+
+@@ -1823,8 +1837,8 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
+
+ sb = journal->j_superblock;
+
+- /* If enabling v2 checksums, update superblock */
+- if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
++ /* If enabling v3 checksums, update superblock */
++ if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
+ sb->s_checksum_type = JBD2_CRC32C_CHKSUM;
+ sb->s_feature_compat &=
+ ~cpu_to_be32(JBD2_FEATURE_COMPAT_CHECKSUM);
+@@ -1842,8 +1856,7 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
+ }
+
+ /* Precompute checksum seed for all metadata */
+- if (JBD2_HAS_INCOMPAT_FEATURE(journal,
+- JBD2_FEATURE_INCOMPAT_CSUM_V2))
++ if (jbd2_journal_has_csum_v2or3(journal))
+ journal->j_csum_seed = jbd2_chksum(journal, ~0,
+ sb->s_uuid,
+ sizeof(sb->s_uuid));
+@@ -1852,7 +1865,8 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
+ /* If enabling v1 checksums, downgrade superblock */
+ if (COMPAT_FEATURE_ON(JBD2_FEATURE_COMPAT_CHECKSUM))
+ sb->s_feature_incompat &=
+- ~cpu_to_be32(JBD2_FEATURE_INCOMPAT_CSUM_V2);
++ ~cpu_to_be32(JBD2_FEATURE_INCOMPAT_CSUM_V2 |
++ JBD2_FEATURE_INCOMPAT_CSUM_V3);
+
+ sb->s_feature_compat |= cpu_to_be32(compat);
+ sb->s_feature_ro_compat |= cpu_to_be32(ro);
+@@ -2165,16 +2179,20 @@ int jbd2_journal_blocks_per_page(struct inode *inode)
+ */
+ size_t journal_tag_bytes(journal_t *journal)
+ {
+- journal_block_tag_t tag;
+- size_t x = 0;
++ size_t sz;
++
++ if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3))
++ return sizeof(journal_block_tag3_t);
++
++ sz = sizeof(journal_block_tag_t);
+
+ if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+- x += sizeof(tag.t_checksum);
++ sz += sizeof(__u16);
+
+ if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
+- return x + JBD2_TAG_SIZE64;
++ return sz;
+ else
+- return x + JBD2_TAG_SIZE32;
++ return sz - sizeof(__u32);
+ }
+
+ /*
+diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
+index 3b6bb19d60b1..9b329b55ffe3 100644
+--- a/fs/jbd2/recovery.c
++++ b/fs/jbd2/recovery.c
+@@ -181,7 +181,7 @@ static int jbd2_descr_block_csum_verify(journal_t *j,
+ __be32 provided;
+ __u32 calculated;
+
+- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++ if (!jbd2_journal_has_csum_v2or3(j))
+ return 1;
+
+ tail = (struct jbd2_journal_block_tail *)(buf + j->j_blocksize -
+@@ -205,7 +205,7 @@ static int count_tags(journal_t *journal, struct buffer_head *bh)
+ int nr = 0, size = journal->j_blocksize;
+ int tag_bytes = journal_tag_bytes(journal);
+
+- if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++ if (jbd2_journal_has_csum_v2or3(journal))
+ size -= sizeof(struct jbd2_journal_block_tail);
+
+ tagp = &bh->b_data[sizeof(journal_header_t)];
+@@ -338,10 +338,11 @@ int jbd2_journal_skip_recovery(journal_t *journal)
+ return err;
+ }
+
+-static inline unsigned long long read_tag_block(int tag_bytes, journal_block_tag_t *tag)
++static inline unsigned long long read_tag_block(journal_t *journal,
++ journal_block_tag_t *tag)
+ {
+ unsigned long long block = be32_to_cpu(tag->t_blocknr);
+- if (tag_bytes > JBD2_TAG_SIZE32)
++ if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
+ block |= (u64)be32_to_cpu(tag->t_blocknr_high) << 32;
+ return block;
+ }
+@@ -384,7 +385,7 @@ static int jbd2_commit_block_csum_verify(journal_t *j, void *buf)
+ __be32 provided;
+ __u32 calculated;
+
+- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++ if (!jbd2_journal_has_csum_v2or3(j))
+ return 1;
+
+ h = buf;
+@@ -399,17 +400,21 @@ static int jbd2_commit_block_csum_verify(journal_t *j, void *buf)
+ static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag,
+ void *buf, __u32 sequence)
+ {
++ journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
+ __u32 csum32;
+ __be32 seq;
+
+- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++ if (!jbd2_journal_has_csum_v2or3(j))
+ return 1;
+
+ seq = cpu_to_be32(sequence);
+ csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
+ csum32 = jbd2_chksum(j, csum32, buf, j->j_blocksize);
+
+- return tag->t_checksum == cpu_to_be16(csum32);
++ if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V3))
++ return tag3->t_checksum == cpu_to_be32(csum32);
++ else
++ return tag->t_checksum == cpu_to_be16(csum32);
+ }
+
+ static int do_one_pass(journal_t *journal,
+@@ -426,6 +431,7 @@ static int do_one_pass(journal_t *journal,
+ int tag_bytes = journal_tag_bytes(journal);
+ __u32 crc32_sum = ~0; /* Transactional Checksums */
+ int descr_csum_size = 0;
++ int block_error = 0;
+
+ /*
+ * First thing is to establish what we expect to find in the log
+@@ -512,8 +518,7 @@ static int do_one_pass(journal_t *journal,
+ switch(blocktype) {
+ case JBD2_DESCRIPTOR_BLOCK:
+ /* Verify checksum first */
+- if (JBD2_HAS_INCOMPAT_FEATURE(journal,
+- JBD2_FEATURE_INCOMPAT_CSUM_V2))
++ if (jbd2_journal_has_csum_v2or3(journal))
+ descr_csum_size =
+ sizeof(struct jbd2_journal_block_tail);
+ if (descr_csum_size > 0 &&
+@@ -574,7 +579,7 @@ static int do_one_pass(journal_t *journal,
+ unsigned long long blocknr;
+
+ J_ASSERT(obh != NULL);
+- blocknr = read_tag_block(tag_bytes,
++ blocknr = read_tag_block(journal,
+ tag);
+
+ /* If the block has been
+@@ -598,7 +603,8 @@ static int do_one_pass(journal_t *journal,
+ "checksum recovering "
+ "block %llu in log\n",
+ blocknr);
+- continue;
++ block_error = 1;
++ goto skip_write;
+ }
+
+ /* Find a buffer for the new
+@@ -797,7 +803,8 @@ static int do_one_pass(journal_t *journal,
+ success = -EIO;
+ }
+ }
+-
++ if (block_error && success == 0)
++ success = -EIO;
+ return success;
+
+ failed:
+@@ -811,7 +818,7 @@ static int jbd2_revoke_block_csum_verify(journal_t *j,
+ __be32 provided;
+ __u32 calculated;
+
+- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++ if (!jbd2_journal_has_csum_v2or3(j))
+ return 1;
+
+ tail = (struct jbd2_journal_revoke_tail *)(buf + j->j_blocksize -
+diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
+index 198c9c10276d..d5e95a175c92 100644
+--- a/fs/jbd2/revoke.c
++++ b/fs/jbd2/revoke.c
+@@ -91,8 +91,8 @@
+ #include <linux/list.h>
+ #include <linux/init.h>
+ #include <linux/bio.h>
+-#endif
+ #include <linux/log2.h>
++#endif
+
+ static struct kmem_cache *jbd2_revoke_record_cache;
+ static struct kmem_cache *jbd2_revoke_table_cache;
+@@ -597,7 +597,7 @@ static void write_one_revoke_record(journal_t *journal,
+ offset = *offsetp;
+
+ /* Do we need to leave space at the end for a checksum? */
+- if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++ if (jbd2_journal_has_csum_v2or3(journal))
+ csum_size = sizeof(struct jbd2_journal_revoke_tail);
+
+ /* Make sure we have a descriptor with space left for the record */
+@@ -644,7 +644,7 @@ static void jbd2_revoke_csum_set(journal_t *j, struct buffer_head *bh)
+ struct jbd2_journal_revoke_tail *tail;
+ __u32 csum;
+
+- if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++ if (!jbd2_journal_has_csum_v2or3(j))
+ return;
+
+ tail = (struct jbd2_journal_revoke_tail *)(bh->b_data + j->j_blocksize -
+diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
+index 8f854dde4150..24c6898159cc 100644
+--- a/fs/nfs/nfs3acl.c
++++ b/fs/nfs/nfs3acl.c
+@@ -129,7 +129,10 @@ static int __nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
+ .rpc_argp = &args,
+ .rpc_resp = &fattr,
+ };
+- int status;
++ int status = 0;
++
++ if (acl == NULL && (!S_ISDIR(inode->i_mode) || dfacl == NULL))
++ goto out;
+
+ status = -EOPNOTSUPP;
+ if (!nfs_server_capable(inode, NFS_CAP_ACLS))
+@@ -256,7 +259,7 @@ nfs3_list_one_acl(struct inode *inode, int type, const char *name, void *data,
+ char *p = data + *result;
+
+ acl = get_acl(inode, type);
+- if (!acl)
++ if (IS_ERR_OR_NULL(acl))
+ return 0;
+
+ posix_acl_release(acl);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 4bf3d97cc5a0..dac979866f83 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2545,6 +2545,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
+ struct nfs4_closedata *calldata = data;
+ struct nfs4_state *state = calldata->state;
+ struct nfs_server *server = NFS_SERVER(calldata->inode);
++ nfs4_stateid *res_stateid = NULL;
+
+ dprintk("%s: begin!\n", __func__);
+ if (!nfs4_sequence_done(task, &calldata->res.seq_res))
+@@ -2555,12 +2556,12 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
+ */
+ switch (task->tk_status) {
+ case 0:
+- if (calldata->roc)
++ res_stateid = &calldata->res.stateid;
++ if (calldata->arg.fmode == 0 && calldata->roc)
+ pnfs_roc_set_barrier(state->inode,
+ calldata->roc_barrier);
+- nfs_clear_open_stateid(state, &calldata->res.stateid, 0);
+ renew_lease(server, calldata->timestamp);
+- goto out_release;
++ break;
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_OLD_STATEID:
+@@ -2574,7 +2575,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
+ goto out_release;
+ }
+ }
+- nfs_clear_open_stateid(state, NULL, calldata->arg.fmode);
++ nfs_clear_open_stateid(state, res_stateid, calldata->arg.fmode);
+ out_release:
+ nfs_release_seqid(calldata->arg.seqid);
+ nfs_refresh_inode(calldata->inode, calldata->res.fattr);
+@@ -2586,6 +2587,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
+ struct nfs4_closedata *calldata = data;
+ struct nfs4_state *state = calldata->state;
+ struct inode *inode = calldata->inode;
++ bool is_rdonly, is_wronly, is_rdwr;
+ int call_close = 0;
+
+ dprintk("%s: begin!\n", __func__);
+@@ -2593,18 +2595,24 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
+ goto out_wait;
+
+ task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
+- calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
+ spin_lock(&state->owner->so_lock);
++ is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
++ is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
++ is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
++ /* Calculate the current open share mode */
++ calldata->arg.fmode = 0;
++ if (is_rdonly || is_rdwr)
++ calldata->arg.fmode |= FMODE_READ;
++ if (is_wronly || is_rdwr)
++ calldata->arg.fmode |= FMODE_WRITE;
+ /* Calculate the change in open mode */
+ if (state->n_rdwr == 0) {
+ if (state->n_rdonly == 0) {
+- call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
+- call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
++ call_close |= is_rdonly || is_rdwr;
+ calldata->arg.fmode &= ~FMODE_READ;
+ }
+ if (state->n_wronly == 0) {
+- call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
+- call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
++ call_close |= is_wronly || is_rdwr;
+ calldata->arg.fmode &= ~FMODE_WRITE;
+ }
+ }
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index 084af1060d79..3fd83327bbad 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -2180,7 +2180,7 @@ out_no_address:
+ return -EINVAL;
+ }
+
+-#define NFS_MOUNT_CMP_FLAGMASK ~(NFS_MOUNT_INTR \
++#define NFS_REMOUNT_CMP_FLAGMASK ~(NFS_MOUNT_INTR \
+ | NFS_MOUNT_SECURE \
+ | NFS_MOUNT_TCP \
+ | NFS_MOUNT_VER3 \
+@@ -2188,15 +2188,16 @@ out_no_address:
+ | NFS_MOUNT_NONLM \
+ | NFS_MOUNT_BROKEN_SUID \
+ | NFS_MOUNT_STRICTLOCK \
+- | NFS_MOUNT_UNSHARED \
+- | NFS_MOUNT_NORESVPORT \
+ | NFS_MOUNT_LEGACY_INTERFACE)
+
++#define NFS_MOUNT_CMP_FLAGMASK (NFS_REMOUNT_CMP_FLAGMASK & \
++ ~(NFS_MOUNT_UNSHARED | NFS_MOUNT_NORESVPORT))
++
+ static int
+ nfs_compare_remount_data(struct nfs_server *nfss,
+ struct nfs_parsed_mount_data *data)
+ {
+- if ((data->flags ^ nfss->flags) & NFS_MOUNT_CMP_FLAGMASK ||
++ if ((data->flags ^ nfss->flags) & NFS_REMOUNT_CMP_FLAGMASK ||
+ data->rsize != nfss->rsize ||
+ data->wsize != nfss->wsize ||
+ data->version != nfss->nfs_client->rpc_ops->version ||
+diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
+index 2c73cae9899d..0f23ad005826 100644
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -689,7 +689,8 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
+ clp->cl_cb_session = ses;
+ args.bc_xprt = conn->cb_xprt;
+ args.prognumber = clp->cl_cb_session->se_cb_prog;
+- args.protocol = XPRT_TRANSPORT_BC_TCP;
++ args.protocol = conn->cb_xprt->xpt_class->xcl_ident |
++ XPRT_TRANSPORT_BC;
+ args.authflavor = ses->se_cb_sec.flavor;
+ }
+ /* Create RPC client */
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index 1879e43f2868..2f2edbb2a4a3 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -221,7 +221,8 @@ static int nfsd_startup_generic(int nrservs)
+ */
+ ret = nfsd_racache_init(2*nrservs);
+ if (ret)
+- return ret;
++ goto dec_users;
++
+ ret = nfs4_state_start();
+ if (ret)
+ goto out_racache;
+@@ -229,6 +230,8 @@ static int nfsd_startup_generic(int nrservs)
+
+ out_racache:
+ nfsd_racache_shutdown();
++dec_users:
++ nfsd_users--;
+ return ret;
+ }
+
+diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
+index 6dfd64b3a604..e973540cd15b 100644
+--- a/include/drm/drm_pciids.h
++++ b/include/drm/drm_pciids.h
+@@ -17,6 +17,7 @@
+ {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++ {0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+@@ -164,8 +165,11 @@
+ {0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++ {0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++ {0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++ {0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+@@ -175,6 +179,8 @@
+ {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++ {0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++ {0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
+@@ -297,6 +303,7 @@
+ {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++ {0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index d5b50a19463c..0dae71e9971c 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -159,7 +159,11 @@ typedef struct journal_header_s
+ * journal_block_tag (in the descriptor). The other h_chksum* fields are
+ * not used.
+ *
+- * Checksum v1 and v2 are mutually exclusive features.
++ * If FEATURE_INCOMPAT_CSUM_V3 is set, the descriptor block uses
++ * journal_block_tag3_t to store a full 32-bit checksum. Everything else
++ * is the same as v2.
++ *
++ * Checksum v1, v2, and v3 are mutually exclusive features.
+ */
+ struct commit_header {
+ __be32 h_magic;
+@@ -179,6 +183,14 @@ struct commit_header {
+ * raw struct shouldn't be used for pointer math or sizeof() - use
+ * journal_tag_bytes(journal) instead to compute this.
+ */
++typedef struct journal_block_tag3_s
++{
++ __be32 t_blocknr; /* The on-disk block number */
++ __be32 t_flags; /* See below */
++ __be32 t_blocknr_high; /* most-significant high 32bits. */
++ __be32 t_checksum; /* crc32c(uuid+seq+block) */
++} journal_block_tag3_t;
++
+ typedef struct journal_block_tag_s
+ {
+ __be32 t_blocknr; /* The on-disk block number */
+@@ -187,9 +199,6 @@ typedef struct journal_block_tag_s
+ __be32 t_blocknr_high; /* most-significant high 32bits. */
+ } journal_block_tag_t;
+
+-#define JBD2_TAG_SIZE32 (offsetof(journal_block_tag_t, t_blocknr_high))
+-#define JBD2_TAG_SIZE64 (sizeof(journal_block_tag_t))
+-
+ /* Tail of descriptor block, for checksumming */
+ struct jbd2_journal_block_tail {
+ __be32 t_checksum; /* crc32c(uuid+descr_block) */
+@@ -284,6 +293,7 @@ typedef struct journal_superblock_s
+ #define JBD2_FEATURE_INCOMPAT_64BIT 0x00000002
+ #define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT 0x00000004
+ #define JBD2_FEATURE_INCOMPAT_CSUM_V2 0x00000008
++#define JBD2_FEATURE_INCOMPAT_CSUM_V3 0x00000010
+
+ /* Features known to this kernel version: */
+ #define JBD2_KNOWN_COMPAT_FEATURES JBD2_FEATURE_COMPAT_CHECKSUM
+@@ -291,7 +301,8 @@ typedef struct journal_superblock_s
+ #define JBD2_KNOWN_INCOMPAT_FEATURES (JBD2_FEATURE_INCOMPAT_REVOKE | \
+ JBD2_FEATURE_INCOMPAT_64BIT | \
+ JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \
+- JBD2_FEATURE_INCOMPAT_CSUM_V2)
++ JBD2_FEATURE_INCOMPAT_CSUM_V2 | \
++ JBD2_FEATURE_INCOMPAT_CSUM_V3)
+
+ #ifdef __KERNEL__
+
+@@ -1296,6 +1307,15 @@ static inline int tid_geq(tid_t x, tid_t y)
+ extern int jbd2_journal_blocks_per_page(struct inode *inode);
+ extern size_t journal_tag_bytes(journal_t *journal);
+
++static inline int jbd2_journal_has_csum_v2or3(journal_t *journal)
++{
++ if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) ||
++ JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3))
++ return 1;
++
++ return 0;
++}
++
+ /*
+ * We reserve t_outstanding_credits >> JBD2_CONTROL_BLOCKS_SHIFT for
+ * transaction control blocks.
+diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
+index 7235040a19b2..5d9d6f84b382 100644
+--- a/include/linux/sunrpc/svc_xprt.h
++++ b/include/linux/sunrpc/svc_xprt.h
+@@ -33,6 +33,7 @@ struct svc_xprt_class {
+ struct svc_xprt_ops *xcl_ops;
+ struct list_head xcl_list;
+ u32 xcl_max_payload;
++ int xcl_ident;
+ };
+
+ /*
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index bc1638b33449..0acf96b790c5 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -3558,9 +3558,10 @@ static int _sched_setscheduler(struct task_struct *p, int policy,
+ };
+
+ /*
+- * Fixup the legacy SCHED_RESET_ON_FORK hack
++ * Fixup the legacy SCHED_RESET_ON_FORK hack, except if
++ * the policy=-1 was passed by sched_setparam().
+ */
+- if (policy & SCHED_RESET_ON_FORK) {
++ if ((policy != -1) && (policy & SCHED_RESET_ON_FORK)) {
+ attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
+ policy &= ~SCHED_RESET_ON_FORK;
+ attr.sched_policy = policy;
+diff --git a/mm/memory.c b/mm/memory.c
+index 8b44f765b645..0a21f3d162ae 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -751,7 +751,7 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn = pte_pfn(pte);
+
+ if (HAVE_PTE_SPECIAL) {
+- if (likely(!pte_special(pte) || pte_numa(pte)))
++ if (likely(!pte_special(pte)))
+ goto check_pfn;
+ if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
+ return NULL;
+@@ -777,15 +777,14 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+ }
+ }
+
++ if (is_zero_pfn(pfn))
++ return NULL;
+ check_pfn:
+ if (unlikely(pfn > highest_memmap_pfn)) {
+ print_bad_pte(vma, addr, pte, NULL);
+ return NULL;
+ }
+
+- if (is_zero_pfn(pfn))
+- return NULL;
+-
+ /*
+ * NOTE! We still have PageReserved() pages in the page tables.
+ * eg. VDSO mappings can cause them to exist.
+diff --git a/mm/util.c b/mm/util.c
+index d5ea733c5082..33e9f4455800 100644
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -277,17 +277,14 @@ pid_t vm_is_stack(struct task_struct *task,
+
+ if (in_group) {
+ struct task_struct *t;
+- rcu_read_lock();
+- if (!pid_alive(task))
+- goto done;
+
+- t = task;
+- do {
++ rcu_read_lock();
++ for_each_thread(task, t) {
+ if (vm_is_stack_for_task(t, vma)) {
+ ret = t->pid;
+ goto done;
+ }
+- } while_each_thread(task, t);
++ }
+ done:
+ rcu_read_unlock();
+ }
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index b507cd327d9b..b2437ee93657 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -692,6 +692,7 @@ static struct svc_xprt_class svc_udp_class = {
+ .xcl_owner = THIS_MODULE,
+ .xcl_ops = &svc_udp_ops,
+ .xcl_max_payload = RPCSVC_MAXPAYLOAD_UDP,
++ .xcl_ident = XPRT_TRANSPORT_UDP,
+ };
+
+ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
+@@ -1292,6 +1293,7 @@ static struct svc_xprt_class svc_tcp_class = {
+ .xcl_owner = THIS_MODULE,
+ .xcl_ops = &svc_tcp_ops,
+ .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
++ .xcl_ident = XPRT_TRANSPORT_TCP,
+ };
+
+ void svc_init_xprt_sock(void)
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index c3b2b3369e52..51c63165073c 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -1306,7 +1306,7 @@ struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
+ }
+ }
+ spin_unlock(&xprt_list_lock);
+- printk(KERN_ERR "RPC: transport (%d) not supported\n", args->ident);
++ dprintk("RPC: transport (%d) not supported\n", args->ident);
+ return ERR_PTR(-EIO);
+
+ found:
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+index e7323fbbd348..06a5d9235107 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+@@ -92,6 +92,7 @@ struct svc_xprt_class svc_rdma_class = {
+ .xcl_owner = THIS_MODULE,
+ .xcl_ops = &svc_rdma_ops,
+ .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
++ .xcl_ident = XPRT_TRANSPORT_RDMA,
+ };
+
+ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
+diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
+index 3a3a3a71088b..50dd0086cfb1 100644
+--- a/sound/pci/Kconfig
++++ b/sound/pci/Kconfig
+@@ -858,8 +858,8 @@ config SND_VIRTUOSO
+ select SND_JACK if INPUT=y || INPUT=SND
+ help
+ Say Y here to include support for sound cards based on the
+- Asus AV66/AV100/AV200 chips, i.e., Xonar D1, DX, D2, D2X, DS,
+- Essence ST (Deluxe), and Essence STX.
++ Asus AV66/AV100/AV200 chips, i.e., Xonar D1, DX, D2, D2X, DS, DSX,
++ Essence ST (Deluxe), and Essence STX (II).
+ Support for the HDAV1.3 (Deluxe) and HDAV1.3 Slim is experimental;
+ for the Xense, missing.
+
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index 092f2bd030bd..b686aca7f000 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -4376,6 +4376,9 @@ static void ca0132_download_dsp(struct hda_codec *codec)
+ return; /* NOP */
+ #endif
+
++ if (spec->dsp_state == DSP_DOWNLOAD_FAILED)
++ return; /* don't retry failures */
++
+ chipio_enable_clocks(codec);
+ spec->dsp_state = DSP_DOWNLOADING;
+ if (!ca0132_download_dsp_images(codec))
+@@ -4552,7 +4555,8 @@ static int ca0132_init(struct hda_codec *codec)
+ struct auto_pin_cfg *cfg = &spec->autocfg;
+ int i;
+
+- spec->dsp_state = DSP_DOWNLOAD_INIT;
++ if (spec->dsp_state != DSP_DOWNLOAD_FAILED)
++ spec->dsp_state = DSP_DOWNLOAD_INIT;
+ spec->curr_chip_addx = INVALID_CHIP_ADDRESS;
+
+ snd_hda_power_up(codec);
+@@ -4663,6 +4667,7 @@ static int patch_ca0132(struct hda_codec *codec)
+ codec->spec = spec;
+ spec->codec = codec;
+
++ spec->dsp_state = DSP_DOWNLOAD_INIT;
+ spec->num_mixers = 1;
+ spec->mixers[0] = ca0132_mixer;
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index b60824e90408..25728aaacc26 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -180,6 +180,8 @@ static void alc_fix_pll(struct hda_codec *codec)
+ spec->pll_coef_idx);
+ val = snd_hda_codec_read(codec, spec->pll_nid, 0,
+ AC_VERB_GET_PROC_COEF, 0);
++ if (val == -1)
++ return;
+ snd_hda_codec_write(codec, spec->pll_nid, 0, AC_VERB_SET_COEF_INDEX,
+ spec->pll_coef_idx);
+ snd_hda_codec_write(codec, spec->pll_nid, 0, AC_VERB_SET_PROC_COEF,
+@@ -2784,6 +2786,8 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
+ static void alc269vb_toggle_power_output(struct hda_codec *codec, int power_up)
+ {
+ int val = alc_read_coef_idx(codec, 0x04);
++ if (val == -1)
++ return;
+ if (power_up)
+ val |= 1 << 11;
+ else
+@@ -3242,6 +3246,15 @@ static int alc269_resume(struct hda_codec *codec)
+ snd_hda_codec_resume_cache(codec);
+ alc_inv_dmic_sync(codec, true);
+ hda_call_check_power_status(codec, 0x01);
++
++ /* on some machine, the BIOS will clear the codec gpio data when enter
++ * suspend, and won't restore the data after resume, so we restore it
++ * in the driver.
++ */
++ if (spec->gpio_led)
++ snd_hda_codec_write(codec, codec->afg, 0, AC_VERB_SET_GPIO_DATA,
++ spec->gpio_led);
++
+ if (spec->has_alc5505_dsp)
+ alc5505_dsp_resume(codec);
+
+@@ -4782,6 +4795,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x1983, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
+ /* ALC282 */
++ SND_PCI_QUIRK(0x103c, 0x2191, "HP Touchsmart 14", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++ SND_PCI_QUIRK(0x103c, 0x2192, "HP Touchsmart 15", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x220d, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x220e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x220f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+@@ -5122,27 +5137,30 @@ static void alc269_fill_coef(struct hda_codec *codec)
+ if ((alc_get_coef0(codec) & 0x00ff) == 0x017) {
+ val = alc_read_coef_idx(codec, 0x04);
+ /* Power up output pin */
+- alc_write_coef_idx(codec, 0x04, val | (1<<11));
++ if (val != -1)
++ alc_write_coef_idx(codec, 0x04, val | (1<<11));
+ }
+
+ if ((alc_get_coef0(codec) & 0x00ff) == 0x018) {
+ val = alc_read_coef_idx(codec, 0xd);
+- if ((val & 0x0c00) >> 10 != 0x1) {
++ if (val != -1 && (val & 0x0c00) >> 10 != 0x1) {
+ /* Capless ramp up clock control */
+ alc_write_coef_idx(codec, 0xd, val | (1<<10));
+ }
+ val = alc_read_coef_idx(codec, 0x17);
+- if ((val & 0x01c0) >> 6 != 0x4) {
++ if (val != -1 && (val & 0x01c0) >> 6 != 0x4) {
+ /* Class D power on reset */
+ alc_write_coef_idx(codec, 0x17, val | (1<<7));
+ }
+ }
+
+ val = alc_read_coef_idx(codec, 0xd); /* Class D */
+- alc_write_coef_idx(codec, 0xd, val | (1<<14));
++ if (val != -1)
++ alc_write_coef_idx(codec, 0xd, val | (1<<14));
+
+ val = alc_read_coef_idx(codec, 0x4); /* HP */
+- alc_write_coef_idx(codec, 0x4, val | (1<<11));
++ if (val != -1)
++ alc_write_coef_idx(codec, 0x4, val | (1<<11));
+ }
+
+ /*
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 3744ea4e843d..4d3a3b932690 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -84,6 +84,7 @@ enum {
+ STAC_DELL_EQ,
+ STAC_ALIENWARE_M17X,
+ STAC_92HD89XX_HP_FRONT_JACK,
++ STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK,
+ STAC_92HD73XX_MODELS
+ };
+
+@@ -1809,6 +1810,11 @@ static const struct hda_pintbl stac92hd89xx_hp_front_jack_pin_configs[] = {
+ {}
+ };
+
++static const struct hda_pintbl stac92hd89xx_hp_z1_g2_right_mic_jack_pin_configs[] = {
++ { 0x0e, 0x400000f0 },
++ {}
++};
++
+ static void stac92hd73xx_fixup_ref(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+ {
+@@ -1931,6 +1937,10 @@ static const struct hda_fixup stac92hd73xx_fixups[] = {
+ [STAC_92HD89XX_HP_FRONT_JACK] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = stac92hd89xx_hp_front_jack_pin_configs,
++ },
++ [STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = stac92hd89xx_hp_z1_g2_right_mic_jack_pin_configs,
+ }
+ };
+
+@@ -1991,6 +2001,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = {
+ "Alienware M17x", STAC_ALIENWARE_M17X),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490,
+ "Alienware M17x R3", STAC_DELL_EQ),
++ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1927,
++ "HP Z1 G2", STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17,
+ "unknown HP", STAC_92HD89XX_HP_FRONT_JACK),
+ {} /* terminator */
+diff --git a/sound/pci/oxygen/virtuoso.c b/sound/pci/oxygen/virtuoso.c
+index 64b9fda5f04a..dbbbacfd535e 100644
+--- a/sound/pci/oxygen/virtuoso.c
++++ b/sound/pci/oxygen/virtuoso.c
+@@ -53,6 +53,7 @@ static DEFINE_PCI_DEVICE_TABLE(xonar_ids) = {
+ { OXYGEN_PCI_SUBID(0x1043, 0x835e) },
+ { OXYGEN_PCI_SUBID(0x1043, 0x838e) },
+ { OXYGEN_PCI_SUBID(0x1043, 0x8522) },
++ { OXYGEN_PCI_SUBID(0x1043, 0x85f4) },
+ { OXYGEN_PCI_SUBID_BROKEN_EEPROM },
+ { }
+ };
+diff --git a/sound/pci/oxygen/xonar_pcm179x.c b/sound/pci/oxygen/xonar_pcm179x.c
+index c8c7f2c9b355..e02605931669 100644
+--- a/sound/pci/oxygen/xonar_pcm179x.c
++++ b/sound/pci/oxygen/xonar_pcm179x.c
+@@ -100,8 +100,8 @@
+ */
+
+ /*
+- * Xonar Essence ST (Deluxe)/STX
+- * -----------------------------
++ * Xonar Essence ST (Deluxe)/STX (II)
++ * ----------------------------------
+ *
+ * CMI8788:
+ *
+@@ -1138,6 +1138,14 @@ int get_xonar_pcm179x_model(struct oxygen *chip,
+ chip->model.resume = xonar_stx_resume;
+ chip->model.set_dac_params = set_pcm1796_params;
+ break;
++ case 0x85f4:
++ chip->model = model_xonar_st;
++ /* TODO: daughterboard support */
++ chip->model.shortname = "Xonar STX II";
++ chip->model.init = xonar_stx_init;
++ chip->model.resume = xonar_stx_resume;
++ chip->model.set_dac_params = set_pcm1796_params;
++ break;
+ default:
+ return -EINVAL;
+ }
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index f652b10ce905..223c47b33ba3 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -1581,6 +1581,35 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ },
+ {
++ /* BOSS ME-25 */
++ USB_DEVICE(0x0582, 0x0113),
++ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++ .ifnum = QUIRK_ANY_INTERFACE,
++ .type = QUIRK_COMPOSITE,
++ .data = (const struct snd_usb_audio_quirk[]) {
++ {
++ .ifnum = 0,
++ .type = QUIRK_AUDIO_STANDARD_INTERFACE
++ },
++ {
++ .ifnum = 1,
++ .type = QUIRK_AUDIO_STANDARD_INTERFACE
++ },
++ {
++ .ifnum = 2,
++ .type = QUIRK_MIDI_FIXED_ENDPOINT,
++ .data = & (const struct snd_usb_midi_endpoint_info) {
++ .out_cables = 0x0001,
++ .in_cables = 0x0001
++ }
++ },
++ {
++ .ifnum = -1
++ }
++ }
++ }
++},
++{
+ /* only 44.1 kHz works at the moment */
+ USB_DEVICE(0x0582, 0x0120),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 7c57f2268dd7..19a921eb75f1 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -670,7 +670,7 @@ static int snd_usb_gamecon780_boot_quirk(struct usb_device *dev)
+ /* set the initial volume and don't change; other values are either
+ * too loud or silent due to firmware bug (bko#65251)
+ */
+- u8 buf[2] = { 0x74, 0xdc };
++ u8 buf[2] = { 0x74, 0xe3 };
+ return snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC_SET_CUR,
+ USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
+ UAC_FU_VOLUME << 8, 9 << 8, buf, 2);
+diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
+index 2458a1dc2ba9..e8ce34c9db32 100644
+--- a/virt/kvm/ioapic.c
++++ b/virt/kvm/ioapic.c
+@@ -254,10 +254,9 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap,
+ spin_lock(&ioapic->lock);
+ for (index = 0; index < IOAPIC_NUM_PINS; index++) {
+ e = &ioapic->redirtbl[index];
+- if (!e->fields.mask &&
+- (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
+- kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC,
+- index) || index == RTC_GSI)) {
++ if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
++ kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) ||
++ index == RTC_GSI) {
+ if (kvm_apic_match_dest(vcpu, NULL, 0,
+ e->fields.dest_id, e->fields.dest_mode)) {
+ __set_bit(e->fields.vector,
+diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
+index 0df7d4b34dfe..714b94932312 100644
+--- a/virt/kvm/iommu.c
++++ b/virt/kvm/iommu.c
+@@ -61,6 +61,14 @@ static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
+ return pfn;
+ }
+
++static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
++{
++ unsigned long i;
++
++ for (i = 0; i < npages; ++i)
++ kvm_release_pfn_clean(pfn + i);
++}
++
+ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+ {
+ gfn_t gfn, end_gfn;
+@@ -123,6 +131,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+ if (r) {
+ printk(KERN_ERR "kvm_iommu_map_address:"
+ "iommu failed to map pfn=%llx\n", pfn);
++ kvm_unpin_pages(kvm, pfn, page_size);
+ goto unmap_pages;
+ }
+
+@@ -134,7 +143,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+ return 0;
+
+ unmap_pages:
+- kvm_iommu_put_pages(kvm, slot->base_gfn, gfn);
++ kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn);
+ return r;
+ }
+
+@@ -266,14 +275,6 @@ out_unlock:
+ return r;
+ }
+
+-static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
+-{
+- unsigned long i;
+-
+- for (i = 0; i < npages; ++i)
+- kvm_release_pfn_clean(pfn + i);
+-}
+-
+ static void kvm_iommu_put_pages(struct kvm *kvm,
+ gfn_t base_gfn, unsigned long npages)
+ {
diff --git a/patches.suse/btrfs-0003-read-lock-extent-buffer-while-walking-backrefs.patch b/patches.suse/btrfs-0003-read-lock-extent-buffer-while-walking-backrefs.patch
deleted file mode 100644
index 70b570ea4c..0000000000
--- a/patches.suse/btrfs-0003-read-lock-extent-buffer-while-walking-backrefs.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-From: Filipe Manana <fdmanana@suse.com>
-Date: Wed, 2 Jul 2014 20:07:54 +0100
-Patch-mainline: 3.17
-Git-commit: 6f7ff6d7832c6be13e8c95598884dbc40ad69fb7
-Subject: [PATCH] Btrfs: read lock extent buffer while walking backrefs
-
-Before processing the extent buffer, acquire a read lock on it, so
-that we're safe against concurrent updates on the extent buffer.
-
-Signed-off-by: Filipe Manana <fdmanana@suse.com>
-Signed-off-by: Chris Mason <clm@fb.com>
-Signed-off-by: David Sterba <dsterba@suse.cz>
----
- fs/btrfs/backref.c | 3 +++
- 1 file changed, 3 insertions(+)
-
---- a/fs/btrfs/backref.c
-+++ b/fs/btrfs/backref.c
-@@ -1001,8 +1001,11 @@ again:
- ret = -EIO;
- goto out;
- }
-+ btrfs_tree_read_lock(eb);
-+ btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
- ret = find_extent_in_eb(eb, bytenr,
- *extent_item_pos, &eie);
-+ btrfs_tree_read_unlock_blocking(eb);
- free_extent_buffer(eb);
- if (ret < 0)
- goto out;
diff --git a/patches.suse/btrfs-0006-fix-compressed-write-corruption-on-enospc.patch b/patches.suse/btrfs-0006-fix-compressed-write-corruption-on-enospc.patch
deleted file mode 100644
index c4e141f246..0000000000
--- a/patches.suse/btrfs-0006-fix-compressed-write-corruption-on-enospc.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-From: Liu Bo <bo.li.liu@oracle.com>
-Date: Thu, 24 Jul 2014 22:48:05 +0800
-Patch-mainline: 3.17
-Git-commit: ce62003f690dff38d3164a632ec69efa15c32cbf
-Subject: [PATCH] Btrfs: fix compressed write corruption on enospc
-
-When failing to allocate space for the whole compressed extent, we'll
-fallback to uncompressed IO, but we've forgotten to redirty the pages
-which belong to this compressed extent, and these 'clean' pages will
-simply skip 'submit' part and go to endio directly, at last we got data
-corruption as we write nothing.
-
-Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
-Tested-By: Martin Steigerwald <martin@lichtvoll.de>
-Signed-off-by: Chris Mason <clm@fb.com>
-Signed-off-by: David Sterba <dsterba@suse.cz>
----
- fs/btrfs/inode.c | 12 ++++++++++++
- 1 file changed, 12 insertions(+)
-
---- a/fs/btrfs/inode.c
-+++ b/fs/btrfs/inode.c
-@@ -709,6 +709,18 @@ retry:
- unlock_extent(io_tree, async_extent->start,
- async_extent->start +
- async_extent->ram_size - 1);
-+
-+ /*
-+ * we need to redirty the pages if we decide to
-+ * fallback to uncompressed IO, otherwise we
-+ * will not submit these pages down to lower
-+ * layers.
-+ */
-+ extent_range_redirty_for_io(inode,
-+ async_extent->start,
-+ async_extent->start +
-+ async_extent->ram_size - 1);
-+
- goto retry;
- }
- goto out_free;
diff --git a/patches.suse/btrfs-0008-fix-csum-tree-corruption-duplicate-and-outdate.patch b/patches.suse/btrfs-0008-fix-csum-tree-corruption-duplicate-and-outdate.patch
deleted file mode 100644
index c72d87b47c..0000000000
--- a/patches.suse/btrfs-0008-fix-csum-tree-corruption-duplicate-and-outdate.patch
+++ /dev/null
@@ -1,104 +0,0 @@
-From: Filipe Manana <fdmanana@suse.com>
-Date: Sat, 9 Aug 2014 21:22:27 +0100
-Patch-mainline: 3.17
-Git-commit: 27b9a8122ff71a8cadfbffb9c4f0694300464f3b
-Subject: [PATCH] Btrfs: fix csum tree corruption, duplicate and outdated
- checksums
-
-Under rare circumstances we can end up leaving 2 versions of a checksum
-for the same file extent range.
-
-The reason for this is that after calling btrfs_next_leaf we process
-slot 0 of the leaf it returns, instead of processing the slot set in
-path->slots[0]. Most of the time (by far) path->slots[0] is 0, but after
-btrfs_next_leaf() releases the path and before it searches for the next
-leaf, another task might cause a split of the next leaf, which migrates
-some of its keys to the leaf we were processing before calling
-btrfs_next_leaf(). In this case btrfs_next_leaf() returns again the
-same leaf but with path->slots[0] having a slot number corresponding
-to the first new key it got, that is, a slot number that didn't exist
-before calling btrfs_next_leaf(), as the leaf now has more keys than
-it had before. So we must really process the returned leaf starting at
-path->slots[0] always, as it isn't always 0, and the key at slot 0 can
-have an offset much lower than our search offset/bytenr.
-
-For example, consider the following scenario, where we have:
-
-sums->bytenr: 40157184, sums->len: 16384, sums end: 40173568
-four 4kb file data blocks with offsets 40157184, 40161280, 40165376, 40169472
-
- Leaf N:
-
- slot = 0 slot = btrfs_header_nritems() - 1
- |-------------------------------------------------------------------|
- | [(CSUM CSUM 39239680), size 8] ... [(CSUM CSUM 40116224), size 4] |
- |-------------------------------------------------------------------|
-
- Leaf N + 1:
-
- slot = 0 slot = btrfs_header_nritems() - 1
- |--------------------------------------------------------------------|
- | [(CSUM CSUM 40161280), size 32] ... [((CSUM CSUM 40615936), size 8 |
- |--------------------------------------------------------------------|
-
-Because we are at the last slot of leaf N, we call btrfs_next_leaf() to
-find the next highest key, which releases the current path and then searches
-for that next key. However after releasing the path and before finding that
-next key, the item at slot 0 of leaf N + 1 gets moved to leaf N, due to a call
-to ctree.c:push_leaf_left() (via ctree.c:split_leaf()), and therefore
-btrfs_next_leaf() will returns us a path again with leaf N but with the slot
-pointing to its new last key (CSUM CSUM 40161280). This new version of leaf N
-is then:
-
- slot = 0 slot = btrfs_header_nritems() - 2 slot = btrfs_header_nritems() - 1
- |----------------------------------------------------------------------------------------------------|
- | [(CSUM CSUM 39239680), size 8] ... [(CSUM CSUM 40116224), size 4] [(CSUM CSUM 40161280), size 32] |
- |----------------------------------------------------------------------------------------------------|
-
-And incorrecly using slot 0, makes us set next_offset to 39239680 and we jump
-into the "insert:" label, which will set tmp to:
-
- tmp = min((sums->len - total_bytes) >> blocksize_bits,
- (next_offset - file_key.offset) >> blocksize_bits) =
- min((16384 - 0) >> 12, (39239680 - 40157184) >> 12) =
- min(4, (u64)-917504 = 18446744073708634112 >> 12) = 4
-
-and
-
- ins_size = csum_size * tmp = 4 * 4 = 16 bytes.
-
-In other words, we insert a new csum item in the tree with key
-(CSUM_OBJECTID CSUM_KEY 40157184 = sums->bytenr) that contains the checksums
-for all the data (4 blocks of 4096 bytes each = sums->len). Which is wrong,
-because the item with key (CSUM CSUM 40161280) (the one that was moved from
-leaf N + 1 to the end of leaf N) contains the old checksums of the last 12288
-bytes of our data and won't get those old checksums removed.
-
-So this leaves us 2 different checksums for 3 4kb blocks of data in the tree,
-and breaks the logical rule:
-
- Key_N+1.offset >= Key_N.offset + length_of_data_its_checksums_cover
-
-An obvious bad effect of this is that a subsequent csum tree lookup to get
-the checksum of any of the blocks with logical offset of 40161280, 40165376
-or 40169472 (the last 3 4kb blocks of file data), will get the old checksums.
-
-Cc: stable@vger.kernel.org
-Signed-off-by: Filipe Manana <fdmanana@suse.com>
-Signed-off-by: Chris Mason <clm@fb.com>
-Signed-off-by: David Sterba <dsterba@suse.cz>
----
- fs/btrfs/file-item.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/fs/btrfs/file-item.c
-+++ b/fs/btrfs/file-item.c
-@@ -756,7 +756,7 @@ again:
- found_next = 1;
- if (ret != 0)
- goto insert;
-- slot = 0;
-+ slot = path->slots[0];
- }
- btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
- if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
diff --git a/patches.suse/btrfs-0033-fix-task-hang-under-heavy-compressed-write.patch b/patches.suse/btrfs-0033-fix-task-hang-under-heavy-compressed-write.patch
deleted file mode 100644
index 7baa506a64..0000000000
--- a/patches.suse/btrfs-0033-fix-task-hang-under-heavy-compressed-write.patch
+++ /dev/null
@@ -1,567 +0,0 @@
-From: Liu Bo <bo.li.liu@oracle.com>
-Date: Fri, 15 Aug 2014 23:36:53 +0800
-Patch-mainline: 3.17
-Git-commit: 9e0af23764344f7f1b68e4eefbe7dc865018b63d
-Subject: [PATCH] Btrfs: fix task hang under heavy compressed write
-
-This has been reported and discussed for a long time, and this hang occurs in
-both 3.15 and 3.16.
-
-Btrfs now migrates to use kernel workqueue, but it introduces this hang problem.
-
-Btrfs has a kind of work queued as an ordered way, which means that its
-ordered_func() must be processed in the way of FIFO, so it usually looks like --
-
-normal_work_helper(arg)
- work = container_of(arg, struct btrfs_work, normal_work);
-
- work->func() <---- (we name it work X)
- for ordered_work in wq->ordered_list
- ordered_work->ordered_func()
- ordered_work->ordered_free()
-
-The hang is a rare case, first when we find free space, we get an uncached block
-group, then we go to read its free space cache inode for free space information,
-so it will
-
-file a readahead request
- btrfs_readpages()
- for page that is not in page cache
- __do_readpage()
- submit_extent_page()
- btrfs_submit_bio_hook()
- btrfs_bio_wq_end_io()
- submit_bio()
- end_workqueue_bio() <--(ret by the 1st endio)
- queue a work(named work Y) for the 2nd
- also the real endio()
-
-So the hang occurs when work Y's work_struct and work X's work_struct happens
-to share the same address.
-
-A bit more explanation,
-
-A,B,C -- struct btrfs_work
-arg -- struct work_struct
-
-kthread:
-worker_thread()
- pick up a work_struct from @worklist
- process_one_work(arg)
- worker->current_work = arg; <-- arg is A->normal_work
- worker->current_func(arg)
- normal_work_helper(arg)
- A = container_of(arg, struct btrfs_work, normal_work);
-
- A->func()
- A->ordered_func()
- A->ordered_free() <-- A gets freed
-
- B->ordered_func()
- submit_compressed_extents()
- find_free_extent()
- load_free_space_inode()
- ... <-- (the above readhead stack)
- end_workqueue_bio()
- btrfs_queue_work(work C)
- B->ordered_free()
-
-As if work A has a high priority in wq->ordered_list and there are more ordered
-works queued after it, such as B->ordered_func(), its memory could have been
-freed before normal_work_helper() returns, which means that kernel workqueue
-code worker_thread() still has worker->current_work pointer to be work
-A->normal_work's, ie. arg's address.
-
-Meanwhile, work C is allocated after work A is freed, work C->normal_work
-and work A->normal_work are likely to share the same address(I confirmed this
-with ftrace output, so I'm not just guessing, it's rare though).
-
-When another kthread picks up work C->normal_work to process, and finds our
-kthread is processing it(see find_worker_executing_work()), it'll think
-work C as a collision and skip then, which ends up nobody processing work C.
-
-So the situation is that our kthread is waiting forever on work C.
-
-Besides, there're other cases that can lead to deadlock, but the real problem
-is that all btrfs workqueue shares one work->func, -- normal_work_helper,
-so this makes each workqueue to have its own helper function, but only a
-wraper pf normal_work_helper.
-
-With this patch, I no long hit the above hang.
-
-Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
-Signed-off-by: Chris Mason <clm@fb.com>
-Signed-off-by: David Sterba <dsterba@suse.cz>
----
- fs/btrfs/async-thread.c | 44 +++++++++++++++++++++++++++++++--------
- fs/btrfs/async-thread.h | 28 +++++++++++++++++++++++-
- fs/btrfs/delayed-inode.c | 4 +--
- fs/btrfs/disk-io.c | 53 +++++++++++++++++++++++++----------------------
- fs/btrfs/extent-tree.c | 7 +++---
- fs/btrfs/inode.c | 35 ++++++++++++++++++++-----------
- fs/btrfs/ordered-data.c | 1
- fs/btrfs/qgroup.c | 1
- fs/btrfs/raid56.c | 9 +++++--
- fs/btrfs/reada.c | 3 +-
- fs/btrfs/scrub.c | 14 +++++++-----
- fs/btrfs/volumes.c | 3 +-
- 12 files changed, 141 insertions(+), 61 deletions(-)
-
---- a/fs/btrfs/async-thread.c
-+++ b/fs/btrfs/async-thread.c
-@@ -22,7 +22,6 @@
- #include <linux/list.h>
- #include <linux/spinlock.h>
- #include <linux/freezer.h>
--#include <linux/workqueue.h>
- #include "async-thread.h"
- #include "ctree.h"
-
-@@ -55,8 +54,39 @@ struct btrfs_workqueue {
- struct __btrfs_workqueue *high;
- };
-
--static inline struct __btrfs_workqueue
--*__btrfs_alloc_workqueue(const char *name, int flags, int max_active,
-+static void normal_work_helper(struct btrfs_work *work);
-+
-+#define BTRFS_WORK_HELPER(name) \
-+void btrfs_##name(struct work_struct *arg) \
-+{ \
-+ struct btrfs_work *work = container_of(arg, struct btrfs_work, \
-+ normal_work); \
-+ normal_work_helper(work); \
-+}
-+
-+BTRFS_WORK_HELPER(worker_helper);
-+BTRFS_WORK_HELPER(delalloc_helper);
-+BTRFS_WORK_HELPER(flush_delalloc_helper);
-+BTRFS_WORK_HELPER(cache_helper);
-+BTRFS_WORK_HELPER(submit_helper);
-+BTRFS_WORK_HELPER(fixup_helper);
-+BTRFS_WORK_HELPER(endio_helper);
-+BTRFS_WORK_HELPER(endio_meta_helper);
-+BTRFS_WORK_HELPER(endio_meta_write_helper);
-+BTRFS_WORK_HELPER(endio_raid56_helper);
-+BTRFS_WORK_HELPER(rmw_helper);
-+BTRFS_WORK_HELPER(endio_write_helper);
-+BTRFS_WORK_HELPER(freespace_write_helper);
-+BTRFS_WORK_HELPER(delayed_meta_helper);
-+BTRFS_WORK_HELPER(readahead_helper);
-+BTRFS_WORK_HELPER(qgroup_rescan_helper);
-+BTRFS_WORK_HELPER(extent_refs_helper);
-+BTRFS_WORK_HELPER(scrub_helper);
-+BTRFS_WORK_HELPER(scrubwrc_helper);
-+BTRFS_WORK_HELPER(scrubnc_helper);
-+
-+static struct __btrfs_workqueue *
-+__btrfs_alloc_workqueue(const char *name, int flags, int max_active,
- int thresh)
- {
- struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
-@@ -232,13 +262,11 @@ static void run_ordered_work(struct __bt
- spin_unlock_irqrestore(lock, flags);
- }
-
--static void normal_work_helper(struct work_struct *arg)
-+static void normal_work_helper(struct btrfs_work *work)
- {
-- struct btrfs_work *work;
- struct __btrfs_workqueue *wq;
- int need_order = 0;
-
-- work = container_of(arg, struct btrfs_work, normal_work);
- /*
- * We should not touch things inside work in the following cases:
- * 1) after work->func() if it has no ordered_free
-@@ -262,7 +290,7 @@ static void normal_work_helper(struct wo
- trace_btrfs_all_work_done(work);
- }
-
--void btrfs_init_work(struct btrfs_work *work,
-+void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
- btrfs_func_t func,
- btrfs_func_t ordered_func,
- btrfs_func_t ordered_free)
-@@ -270,7 +298,7 @@ void btrfs_init_work(struct btrfs_work *
- work->func = func;
- work->ordered_func = ordered_func;
- work->ordered_free = ordered_free;
-- INIT_WORK(&work->normal_work, normal_work_helper);
-+ INIT_WORK(&work->normal_work, uniq_func);
- INIT_LIST_HEAD(&work->ordered_list);
- work->flags = 0;
- }
---- a/fs/btrfs/async-thread.h
-+++ b/fs/btrfs/async-thread.h
-@@ -19,12 +19,14 @@
-
- #ifndef __BTRFS_ASYNC_THREAD_
- #define __BTRFS_ASYNC_THREAD_
-+#include <linux/workqueue.h>
-
- struct btrfs_workqueue;
- /* Internal use only */
- struct __btrfs_workqueue;
- struct btrfs_work;
- typedef void (*btrfs_func_t)(struct btrfs_work *arg);
-+typedef void (*btrfs_work_func_t)(struct work_struct *arg);
-
- struct btrfs_work {
- btrfs_func_t func;
-@@ -38,11 +40,35 @@ struct btrfs_work {
- unsigned long flags;
- };
-
-+#define BTRFS_WORK_HELPER_PROTO(name) \
-+void btrfs_##name(struct work_struct *arg)
-+
-+BTRFS_WORK_HELPER_PROTO(worker_helper);
-+BTRFS_WORK_HELPER_PROTO(delalloc_helper);
-+BTRFS_WORK_HELPER_PROTO(flush_delalloc_helper);
-+BTRFS_WORK_HELPER_PROTO(cache_helper);
-+BTRFS_WORK_HELPER_PROTO(submit_helper);
-+BTRFS_WORK_HELPER_PROTO(fixup_helper);
-+BTRFS_WORK_HELPER_PROTO(endio_helper);
-+BTRFS_WORK_HELPER_PROTO(endio_meta_helper);
-+BTRFS_WORK_HELPER_PROTO(endio_meta_write_helper);
-+BTRFS_WORK_HELPER_PROTO(endio_raid56_helper);
-+BTRFS_WORK_HELPER_PROTO(rmw_helper);
-+BTRFS_WORK_HELPER_PROTO(endio_write_helper);
-+BTRFS_WORK_HELPER_PROTO(freespace_write_helper);
-+BTRFS_WORK_HELPER_PROTO(delayed_meta_helper);
-+BTRFS_WORK_HELPER_PROTO(readahead_helper);
-+BTRFS_WORK_HELPER_PROTO(qgroup_rescan_helper);
-+BTRFS_WORK_HELPER_PROTO(extent_refs_helper);
-+BTRFS_WORK_HELPER_PROTO(scrub_helper);
-+BTRFS_WORK_HELPER_PROTO(scrubwrc_helper);
-+BTRFS_WORK_HELPER_PROTO(scrubnc_helper);
-+
- struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
- int flags,
- int max_active,
- int thresh);
--void btrfs_init_work(struct btrfs_work *work,
-+void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper,
- btrfs_func_t func,
- btrfs_func_t ordered_func,
- btrfs_func_t ordered_free);
---- a/fs/btrfs/delayed-inode.c
-+++ b/fs/btrfs/delayed-inode.c
-@@ -1395,8 +1395,8 @@ static int btrfs_wq_run_delayed_node(str
- return -ENOMEM;
-
- async_work->delayed_root = delayed_root;
-- btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root,
-- NULL, NULL);
-+ btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
-+ btrfs_async_run_delayed_root, NULL, NULL);
- async_work->nr = nr;
-
- btrfs_queue_work(root->fs_info->delayed_workers, &async_work->work);
---- a/fs/btrfs/disk-io.c
-+++ b/fs/btrfs/disk-io.c
-@@ -39,7 +39,6 @@
- #include "btrfs_inode.h"
- #include "volumes.h"
- #include "print-tree.h"
--#include "async-thread.h"
- #include "locking.h"
- #include "tree-log.h"
- #include "free-space-cache.h"
-@@ -695,35 +694,41 @@ static void end_workqueue_bio(struct bio
- {
- struct end_io_wq *end_io_wq = bio->bi_private;
- struct btrfs_fs_info *fs_info;
-+ struct btrfs_workqueue *wq;
-+ btrfs_work_func_t func;
-
- fs_info = end_io_wq->info;
- end_io_wq->error = err;
-- btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL);
-
- if (bio->bi_rw & REQ_WRITE) {
-- if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
-- btrfs_queue_work(fs_info->endio_meta_write_workers,
-- &end_io_wq->work);
-- else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
-- btrfs_queue_work(fs_info->endio_freespace_worker,
-- &end_io_wq->work);
-- else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
-- btrfs_queue_work(fs_info->endio_raid56_workers,
-- &end_io_wq->work);
-- else
-- btrfs_queue_work(fs_info->endio_write_workers,
-- &end_io_wq->work);
-+ if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
-+ wq = fs_info->endio_meta_write_workers;
-+ func = btrfs_endio_meta_write_helper;
-+ } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
-+ wq = fs_info->endio_freespace_worker;
-+ func = btrfs_freespace_write_helper;
-+ } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
-+ wq = fs_info->endio_raid56_workers;
-+ func = btrfs_endio_raid56_helper;
-+ } else {
-+ wq = fs_info->endio_write_workers;
-+ func = btrfs_endio_write_helper;
-+ }
- } else {
-- if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
-- btrfs_queue_work(fs_info->endio_raid56_workers,
-- &end_io_wq->work);
-- else if (end_io_wq->metadata)
-- btrfs_queue_work(fs_info->endio_meta_workers,
-- &end_io_wq->work);
-- else
-- btrfs_queue_work(fs_info->endio_workers,
-- &end_io_wq->work);
-+ if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
-+ wq = fs_info->endio_raid56_workers;
-+ func = btrfs_endio_raid56_helper;
-+ } else if (end_io_wq->metadata) {
-+ wq = fs_info->endio_meta_workers;
-+ func = btrfs_endio_meta_helper;
-+ } else {
-+ wq = fs_info->endio_workers;
-+ func = btrfs_endio_helper;
-+ }
- }
-+
-+ btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
-+ btrfs_queue_work(wq, &end_io_wq->work);
- }
-
- /*
-@@ -830,7 +835,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_
- async->submit_bio_start = submit_bio_start;
- async->submit_bio_done = submit_bio_done;
-
-- btrfs_init_work(&async->work, run_one_async_start,
-+ btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
- run_one_async_done, run_one_async_free);
-
- async->bio_flags = bio_flags;
---- a/fs/btrfs/extent-tree.c
-+++ b/fs/btrfs/extent-tree.c
-@@ -552,7 +552,8 @@ static int cache_block_group(struct btrf
- caching_ctl->block_group = cache;
- caching_ctl->progress = cache->key.objectid;
- atomic_set(&caching_ctl->count, 1);
-- btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
-+ btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
-+ caching_thread, NULL, NULL);
-
- spin_lock(&cache->lock);
- /*
-@@ -2749,8 +2750,8 @@ int btrfs_async_run_delayed_refs(struct
- async->sync = 0;
- init_completion(&async->wait);
-
-- btrfs_init_work(&async->work, delayed_ref_async_start,
-- NULL, NULL);
-+ btrfs_init_work(&async->work, btrfs_extent_refs_helper,
-+ delayed_ref_async_start, NULL, NULL);
-
- btrfs_queue_work(root->fs_info->extent_workers, &async->work);
-
---- a/fs/btrfs/inode.c
-+++ b/fs/btrfs/inode.c
-@@ -1096,8 +1096,10 @@ static int cow_file_range_async(struct i
- async_cow->end = cur_end;
- INIT_LIST_HEAD(&async_cow->extents);
-
-- btrfs_init_work(&async_cow->work, async_cow_start,
-- async_cow_submit, async_cow_free);
-+ btrfs_init_work(&async_cow->work,
-+ btrfs_delalloc_helper,
-+ async_cow_start, async_cow_submit,
-+ async_cow_free);
-
- nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
- PAGE_CACHE_SHIFT;
-@@ -1881,7 +1883,8 @@ static int btrfs_writepage_start_hook(st
-
- SetPageChecked(page);
- page_cache_get(page);
-- btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL);
-+ btrfs_init_work(&fixup->work, btrfs_fixup_helper,
-+ btrfs_writepage_fixup_worker, NULL, NULL);
- fixup->page = page;
- btrfs_queue_work(root->fs_info->fixup_workers, &fixup->work);
- return -EBUSY;
-@@ -2822,7 +2825,8 @@ static int btrfs_writepage_end_io_hook(s
- struct inode *inode = page->mapping->host;
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_ordered_extent *ordered_extent = NULL;
-- struct btrfs_workqueue *workers;
-+ struct btrfs_workqueue *wq;
-+ btrfs_work_func_t func;
-
- trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
-
-@@ -2831,13 +2835,17 @@ static int btrfs_writepage_end_io_hook(s
- end - start + 1, uptodate))
- return 0;
-
-- btrfs_init_work(&ordered_extent->work, finish_ordered_fn, NULL, NULL);
-+ if (btrfs_is_free_space_inode(inode)) {
-+ wq = root->fs_info->endio_freespace_worker;
-+ func = btrfs_freespace_write_helper;
-+ } else {
-+ wq = root->fs_info->endio_write_workers;
-+ func = btrfs_endio_write_helper;
-+ }
-
-- if (btrfs_is_free_space_inode(inode))
-- workers = root->fs_info->endio_freespace_worker;
-- else
-- workers = root->fs_info->endio_write_workers;
-- btrfs_queue_work(workers, &ordered_extent->work);
-+ btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL,
-+ NULL);
-+ btrfs_queue_work(wq, &ordered_extent->work);
-
- return 0;
- }
-@@ -7173,7 +7181,8 @@ again:
- if (!ret)
- goto out_test;
-
-- btrfs_init_work(&ordered->work, finish_ordered_fn, NULL, NULL);
-+ btrfs_init_work(&ordered->work, btrfs_endio_write_helper,
-+ finish_ordered_fn, NULL, NULL);
- btrfs_queue_work(root->fs_info->endio_write_workers,
- &ordered->work);
- out_test:
-@@ -8542,7 +8551,9 @@ struct btrfs_delalloc_work *btrfs_alloc_
- work->inode = inode;
- work->wait = wait;
- work->delay_iput = delay_iput;
-- btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL);
-+ WARN_ON_ONCE(!inode);
-+ btrfs_init_work(&work->work, btrfs_flush_delalloc_helper,
-+ btrfs_run_delalloc_work, NULL, NULL);
-
- return work;
- }
---- a/fs/btrfs/ordered-data.c
-+++ b/fs/btrfs/ordered-data.c
-@@ -627,6 +627,7 @@ int btrfs_wait_ordered_extents(struct bt
- spin_unlock(&root->ordered_extent_lock);
-
- btrfs_init_work(&ordered->flush_work,
-+ btrfs_flush_delalloc_helper,
- btrfs_run_ordered_extent_work, NULL, NULL);
- list_add_tail(&ordered->work_list, &works);
- btrfs_queue_work(root->fs_info->flush_workers,
---- a/fs/btrfs/qgroup.c
-+++ b/fs/btrfs/qgroup.c
-@@ -2551,6 +2551,7 @@ qgroup_rescan_init(struct btrfs_fs_info
- memset(&fs_info->qgroup_rescan_work, 0,
- sizeof(fs_info->qgroup_rescan_work));
- btrfs_init_work(&fs_info->qgroup_rescan_work,
-+ btrfs_qgroup_rescan_helper,
- btrfs_qgroup_rescan_worker, NULL, NULL);
-
- if (ret) {
---- a/fs/btrfs/raid56.c
-+++ b/fs/btrfs/raid56.c
-@@ -1416,7 +1416,8 @@ cleanup:
-
- static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
- {
-- btrfs_init_work(&rbio->work, rmw_work, NULL, NULL);
-+ btrfs_init_work(&rbio->work, btrfs_rmw_helper,
-+ rmw_work, NULL, NULL);
-
- btrfs_queue_work(rbio->fs_info->rmw_workers,
- &rbio->work);
-@@ -1424,7 +1425,8 @@ static void async_rmw_stripe(struct btrf
-
- static void async_read_rebuild(struct btrfs_raid_bio *rbio)
- {
-- btrfs_init_work(&rbio->work, read_rebuild_work, NULL, NULL);
-+ btrfs_init_work(&rbio->work, btrfs_rmw_helper,
-+ read_rebuild_work, NULL, NULL);
-
- btrfs_queue_work(rbio->fs_info->rmw_workers,
- &rbio->work);
-@@ -1665,7 +1667,8 @@ static void btrfs_raid_unplug(struct blk
- plug = container_of(cb, struct btrfs_plug_cb, cb);
-
- if (from_schedule) {
-- btrfs_init_work(&plug->work, unplug_work, NULL, NULL);
-+ btrfs_init_work(&plug->work, btrfs_rmw_helper,
-+ unplug_work, NULL, NULL);
- btrfs_queue_work(plug->info->rmw_workers,
- &plug->work);
- return;
---- a/fs/btrfs/reada.c
-+++ b/fs/btrfs/reada.c
-@@ -798,7 +798,8 @@ static void reada_start_machine(struct b
- /* FIXME we cannot handle this properly right now */
- BUG();
- }
-- btrfs_init_work(&rmw->work, reada_start_machine_worker, NULL, NULL);
-+ btrfs_init_work(&rmw->work, btrfs_readahead_helper,
-+ reada_start_machine_worker, NULL, NULL);
- rmw->fs_info = fs_info;
-
- btrfs_queue_work(fs_info->readahead_workers, &rmw->work);
---- a/fs/btrfs/scrub.c
-+++ b/fs/btrfs/scrub.c
-@@ -428,8 +428,8 @@ struct scrub_ctx *scrub_setup_ctx(struct
- sbio->index = i;
- sbio->sctx = sctx;
- sbio->page_count = 0;
-- btrfs_init_work(&sbio->work, scrub_bio_end_io_worker,
-- NULL, NULL);
-+ btrfs_init_work(&sbio->work, btrfs_scrub_helper,
-+ scrub_bio_end_io_worker, NULL, NULL);
-
- if (i != SCRUB_BIOS_PER_SCTX - 1)
- sctx->bios[i]->next_free = i + 1;
-@@ -999,8 +999,8 @@ nodatasum_case:
- fixup_nodatasum->root = fs_info->extent_root;
- fixup_nodatasum->mirror_num = failed_mirror_index + 1;
- scrub_pending_trans_workers_inc(sctx);
-- btrfs_init_work(&fixup_nodatasum->work, scrub_fixup_nodatasum,
-- NULL, NULL);
-+ btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
-+ scrub_fixup_nodatasum, NULL, NULL);
- btrfs_queue_work(fs_info->scrub_workers,
- &fixup_nodatasum->work);
- goto out;
-@@ -1616,7 +1616,8 @@ static void scrub_wr_bio_end_io(struct b
- sbio->err = err;
- sbio->bio = bio;
-
-- btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL);
-+ btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
-+ scrub_wr_bio_end_io_worker, NULL, NULL);
- btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
- }
-
-@@ -3203,7 +3204,8 @@ static int copy_nocow_pages(struct scrub
- nocow_ctx->len = len;
- nocow_ctx->mirror_num = mirror_num;
- nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
-- btrfs_init_work(&nocow_ctx->work, copy_nocow_pages_worker, NULL, NULL);
-+ btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
-+ copy_nocow_pages_worker, NULL, NULL);
- INIT_LIST_HEAD(&nocow_ctx->inodes);
- btrfs_queue_work(fs_info->scrub_nocow_workers,
- &nocow_ctx->work);
---- a/fs/btrfs/volumes.c
-+++ b/fs/btrfs/volumes.c
-@@ -5800,7 +5800,8 @@ struct btrfs_device *btrfs_alloc_device(
- else
- generate_random_uuid(dev->uuid);
-
-- btrfs_init_work(&dev->work, pending_bios_fn, NULL, NULL);
-+ btrfs_init_work(&dev->work, btrfs_submit_helper,
-+ pending_bios_fn, NULL, NULL);
-
- return dev;
- }
diff --git a/patches.xen/xen-x86-EFI b/patches.xen/xen-x86-EFI
index 248c4945e6..dd9f2783bb 100644
--- a/patches.xen/xen-x86-EFI
+++ b/patches.xen/xen-x86-EFI
@@ -20,9 +20,9 @@ References: fate#311376, fate#311529, bnc#578927, bnc#628554
bool "EFI stub support"
- depends on EFI
+ depends on EFI && !XEN
+ select RELOCATABLE
---help---
This kernel feature allows a bzImage to be loaded directly
- by EFI firmware without the use of a bootloader.
--- head.orig/arch/x86/include/mach-xen/asm/setup.h 2011-02-01 14:54:13.000000000 +0100
+++ head/arch/x86/include/mach-xen/asm/setup.h 2011-07-04 12:32:43.000000000 +0200
@@ -3,6 +3,12 @@
diff --git a/patches.xen/xen-x86-bzImage b/patches.xen/xen-x86-bzImage
index 2820a4c2d7..bb13b9dc18 100644
--- a/patches.xen/xen-x86-bzImage
+++ b/patches.xen/xen-x86-bzImage
@@ -16,9 +16,9 @@ patch (but is probably better kept here).
bool "EFI stub support"
- depends on EFI && !XEN
+ depends on EFI && (!XEN || XEN_BZIMAGE)
+ select RELOCATABLE
---help---
This kernel feature allows a bzImage to be loaded directly
- by EFI firmware without the use of a bootloader.
@@ -1809,6 +1809,34 @@ config PHYSICAL_ALIGN
Don't change this unless you know what you are doing.
diff --git a/patches.xen/xen3-auto-common.diff b/patches.xen/xen3-auto-common.diff
index 8adb3bad9a..4da091316e 100644
--- a/patches.xen/xen3-auto-common.diff
+++ b/patches.xen/xen3-auto-common.diff
@@ -3799,10 +3799,10 @@ take the forward porting patches:
+#endif
+
if (HAVE_PTE_SPECIAL) {
- if (likely(!pte_special(pte) || pte_numa(pte)))
+ if (likely(!pte_special(pte)))
goto check_pfn;
@@ -779,6 +785,9 @@ struct page *vm_normal_page(struct vm_ar
-
+ return NULL;
check_pfn:
if (unlikely(pfn > highest_memmap_pfn)) {
+#ifdef CONFIG_XEN
diff --git a/patches.xen/xen3-patch-2.6.37 b/patches.xen/xen3-patch-2.6.37
index 3215aa5b82..77be522161 100644
--- a/patches.xen/xen3-patch-2.6.37
+++ b/patches.xen/xen3-patch-2.6.37
@@ -3955,9 +3955,9 @@ Automatically created from "patches.kernel.org/patch-2.6.37" by xen-port-patches
+ if (!is_initial_xendomain())
+ return;
+#endif
- /* Trim out BIOS areas (low 1MB and high 2MB) and E820 regions */
- if (avail->flags & IORESOURCE_MEM) {
- if (avail->start < BIOS_END)
+ /*
+ * Trim out BIOS area (high 2MB) and E820 regions. We do not remove
+ * the low 1MB unconditionally, as this area is needed for some ISA
--- head.orig/arch/x86/kernel/setup-xen.c 2013-12-06 15:07:45.000000000 +0100
+++ head/arch/x86/kernel/setup-xen.c 2013-12-06 15:07:53.000000000 +0100
@@ -31,6 +31,7 @@
diff --git a/series.conf b/series.conf
index 9a0ca3c4e3..8b5204b407 100644
--- a/series.conf
+++ b/series.conf
@@ -28,6 +28,7 @@
# Send separate patches upstream if you find a problem...
########################################################
patches.kernel.org/patch-3.16.1
+ patches.kernel.org/patch-3.16.1-2
########################################################
# Build fixes that apply to the vanilla kernel too.
@@ -226,8 +227,6 @@
########################################################
# NFS
########################################################
- patches.fixes/nfs-nfs3_list_one_acl-check-get_acl-result-with-is_err_or_null
-
########################################################
# lockd + statd
@@ -251,17 +250,12 @@
########################################################
patches.suse/btrfs-use-correct-device-for-maps.patch
patches.suse/btrfs-check-index-in-extent_buffer_page
- patches.suse/btrfs-0003-read-lock-extent-buffer-while-walking-backrefs.patch
- patches.suse/btrfs-0006-fix-compressed-write-corruption-on-enospc.patch
- patches.fixes/Btrfs-Fix-memory-corruption-by-ulist_add_merge-on-32
- patches.suse/btrfs-0008-fix-csum-tree-corruption-duplicate-and-outdate.patch
patches.suse/btrfs-0011-fill_holes-Fix-slot-number-passed-to-hole_merg.patch
patches.suse/btrfs-0016-fix-wrong-write-range-for-filemap_fdatawrite_r.patch
patches.suse/btrfs-0017-fix-wrong-extent-mapping-for-DirectIO.patch
patches.suse/btrfs-0018-Return-right-extent-when-fiemap-gives-unaligne.patch
patches.suse/btrfs-0025-ensure-tmpfile-inode-is-always-persisted-with-.patch
patches.suse/btrfs-0027-don-t-monopolize-a-core-when-evicting-inode.patch
- patches.suse/btrfs-0033-fix-task-hang-under-heavy-compressed-write.patch
########################################################
# Reiserfs Patches
@@ -371,7 +365,6 @@
# DRM/Video
########################################################
patches.fixes/nouveau-fix-race-with-fence-signaling
- patches.fixes/drm-omapdrm-fix-compiler-errors
########################################################
# video4linux