Home Home > GIT Browse > vanilla
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJiri Slaby <jslaby@suse.cz>2014-09-17 22:55:37 +0200
committerJiri Slaby <jslaby@suse.cz>2014-09-18 08:32:16 +0200
commitd2bbe7fb53704e1d54bc1e3939ba0c9e0fa6aa3b (patch)
treea77e3d0fd07aaf86d95aad9ee7c2f56afe73bb53
parent5fca623fef82770d0a6895d3fb6382ac069cb1a6 (diff)
- Delete patches.fixes/reiserfs-fix-corruption-introduced-by-balance_leaf-refactor. - Delete patches.fixes/rtsx_usb-export-device-table. - Update config files. Set CONFIG_SMS_SIANO_DEBUGFS=n as it is in master.
-rw-r--r--config/arm64/default2
-rw-r--r--config/armv6hl/default3
-rw-r--r--config/armv7hl/default3
-rw-r--r--config/armv7hl/lpae3
-rw-r--r--config/i386/debug3
-rw-r--r--config/i386/default3
-rw-r--r--config/i386/desktop3
-rw-r--r--config/i386/ec22
-rw-r--r--config/i386/pae3
-rw-r--r--config/i386/vanilla3
-rw-r--r--config/i386/xen3
-rw-r--r--config/ppc/default2
-rw-r--r--config/ppc/vanilla2
-rw-r--r--config/ppc64/debug2
-rw-r--r--config/ppc64/default2
-rw-r--r--config/ppc64/vanilla2
-rw-r--r--config/ppc64le/debug2
-rw-r--r--config/ppc64le/default2
-rw-r--r--config/ppc64le/vanilla2
-rw-r--r--config/s390x/default5
-rw-r--r--config/s390x/vanilla5
-rw-r--r--config/x86_64/debug3
-rw-r--r--config/x86_64/default3
-rw-r--r--config/x86_64/desktop3
-rw-r--r--config/x86_64/ec22
-rw-r--r--config/x86_64/vanilla3
-rw-r--r--config/x86_64/xen3
-rw-r--r--patches.fixes/reiserfs-fix-corruption-introduced-by-balance_leaf-refactor376
-rw-r--r--patches.fixes/rtsx_usb-export-device-table28
-rw-r--r--patches.kernel.org/patch-3.16.2-37306
-rw-r--r--series.conf3
31 files changed, 7354 insertions, 433 deletions
diff --git a/config/arm64/default b/config/arm64/default
index 0121bc135e..28e2609a65 100644
--- a/config/arm64/default
+++ b/config/arm64/default
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/arm64 3.16.1 Kernel Configuration
+# Linux/arm64 3.16.3 Kernel Configuration
#
CONFIG_ARM64=y
CONFIG_64BIT=y
diff --git a/config/armv6hl/default b/config/armv6hl/default
index d1c862eede..466aea427f 100644
--- a/config/armv6hl/default
+++ b/config/armv6hl/default
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/arm 3.16.1 Kernel Configuration
+# Linux/arm 3.16.3 Kernel Configuration
#
CONFIG_ARM=y
CONFIG_ARM_HAS_SG_CHAIN=y
@@ -3626,6 +3626,7 @@ CONFIG_CYPRESS_FIRMWARE=m
CONFIG_DVB_B2C2_FLEXCOP=m
CONFIG_SMS_SIANO_MDTV=m
CONFIG_SMS_SIANO_RC=y
+# CONFIG_SMS_SIANO_DEBUGFS is not set
#
# Media ancillary drivers (tuners, sensors, i2c, frontends)
diff --git a/config/armv7hl/default b/config/armv7hl/default
index 523fd23b1d..ca4bb54836 100644
--- a/config/armv7hl/default
+++ b/config/armv7hl/default
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/arm 3.16.1 Kernel Configuration
+# Linux/arm 3.16.3 Kernel Configuration
#
CONFIG_ARM=y
CONFIG_ARM_HAS_SG_CHAIN=y
@@ -4475,6 +4475,7 @@ CONFIG_CYPRESS_FIRMWARE=m
CONFIG_DVB_B2C2_FLEXCOP=m
CONFIG_SMS_SIANO_MDTV=m
CONFIG_SMS_SIANO_RC=y
+# CONFIG_SMS_SIANO_DEBUGFS is not set
#
# Media ancillary drivers (tuners, sensors, i2c, frontends)
diff --git a/config/armv7hl/lpae b/config/armv7hl/lpae
index f7072b1d83..a6eeacd206 100644
--- a/config/armv7hl/lpae
+++ b/config/armv7hl/lpae
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/arm 3.16.1 Kernel Configuration
+# Linux/arm 3.16.3 Kernel Configuration
#
CONFIG_ARM=y
CONFIG_ARM_HAS_SG_CHAIN=y
@@ -4221,6 +4221,7 @@ CONFIG_CYPRESS_FIRMWARE=m
CONFIG_DVB_B2C2_FLEXCOP=m
CONFIG_SMS_SIANO_MDTV=m
CONFIG_SMS_SIANO_RC=y
+# CONFIG_SMS_SIANO_DEBUGFS is not set
#
# Media ancillary drivers (tuners, sensors, i2c, frontends)
diff --git a/config/i386/debug b/config/i386/debug
index c56a1b7584..672ab5689a 100644
--- a/config/i386/debug
+++ b/config/i386/debug
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/i386 3.16.0 Kernel Configuration
+# Linux/i386 3.16.3 Kernel Configuration
#
# CONFIG_64BIT is not set
CONFIG_X86_32=y
@@ -4360,6 +4360,7 @@ CONFIG_VIDEO_SAA7146=m
CONFIG_VIDEO_SAA7146_VV=m
CONFIG_SMS_SIANO_MDTV=m
CONFIG_SMS_SIANO_RC=y
+# CONFIG_SMS_SIANO_DEBUGFS is not set
#
# Media ancillary drivers (tuners, sensors, i2c, frontends)
diff --git a/config/i386/default b/config/i386/default
index 7ff00f6c41..a6ac3dff3c 100644
--- a/config/i386/default
+++ b/config/i386/default
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/i386 3.16.0 Kernel Configuration
+# Linux/i386 3.16.3 Kernel Configuration
#
# CONFIG_64BIT is not set
CONFIG_X86_32=y
@@ -4397,6 +4397,7 @@ CONFIG_VIDEO_SAA7146=m
CONFIG_VIDEO_SAA7146_VV=m
CONFIG_SMS_SIANO_MDTV=m
CONFIG_SMS_SIANO_RC=y
+# CONFIG_SMS_SIANO_DEBUGFS is not set
#
# Media ancillary drivers (tuners, sensors, i2c, frontends)
diff --git a/config/i386/desktop b/config/i386/desktop
index 10f636ba8a..5d9f98c0ac 100644
--- a/config/i386/desktop
+++ b/config/i386/desktop
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/i386 3.16.0 Kernel Configuration
+# Linux/i386 3.16.3 Kernel Configuration
#
# CONFIG_64BIT is not set
CONFIG_X86_32=y
@@ -4291,6 +4291,7 @@ CONFIG_VIDEO_SAA7146=m
CONFIG_VIDEO_SAA7146_VV=m
CONFIG_SMS_SIANO_MDTV=m
CONFIG_SMS_SIANO_RC=y
+# CONFIG_SMS_SIANO_DEBUGFS is not set
#
# Media ancillary drivers (tuners, sensors, i2c, frontends)
diff --git a/config/i386/ec2 b/config/i386/ec2
index b3d21e375b..e336384dca 100644
--- a/config/i386/ec2
+++ b/config/i386/ec2
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/i386 3.16.0 Kernel Configuration
+# Linux/i386 3.16.3 Kernel Configuration
#
# CONFIG_64BIT is not set
CONFIG_X86_32=y
diff --git a/config/i386/pae b/config/i386/pae
index 3afa7689fb..412f938324 100644
--- a/config/i386/pae
+++ b/config/i386/pae
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/i386 3.16.0 Kernel Configuration
+# Linux/i386 3.16.3 Kernel Configuration
#
# CONFIG_64BIT is not set
CONFIG_X86_32=y
@@ -4292,6 +4292,7 @@ CONFIG_VIDEO_SAA7146=m
CONFIG_VIDEO_SAA7146_VV=m
CONFIG_SMS_SIANO_MDTV=m
CONFIG_SMS_SIANO_RC=y
+# CONFIG_SMS_SIANO_DEBUGFS is not set
#
# Media ancillary drivers (tuners, sensors, i2c, frontends)
diff --git a/config/i386/vanilla b/config/i386/vanilla
index b3370c24d4..7f20efed7d 100644
--- a/config/i386/vanilla
+++ b/config/i386/vanilla
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/i386 3.16.0 Kernel Configuration
+# Linux/i386 3.16.3 Kernel Configuration
#
# CONFIG_64BIT is not set
CONFIG_X86_32=y
@@ -4391,6 +4391,7 @@ CONFIG_VIDEO_SAA7146=m
CONFIG_VIDEO_SAA7146_VV=m
CONFIG_SMS_SIANO_MDTV=m
CONFIG_SMS_SIANO_RC=y
+# CONFIG_SMS_SIANO_DEBUGFS is not set
#
# Media ancillary drivers (tuners, sensors, i2c, frontends)
diff --git a/config/i386/xen b/config/i386/xen
index c9de00f154..043a6f8662 100644
--- a/config/i386/xen
+++ b/config/i386/xen
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/i386 3.16.2 Kernel Configuration
+# Linux/i386 3.16.3 Kernel Configuration
#
# CONFIG_64BIT is not set
CONFIG_X86_32=y
@@ -4066,6 +4066,7 @@ CONFIG_VIDEO_SAA7146=m
CONFIG_VIDEO_SAA7146_VV=m
CONFIG_SMS_SIANO_MDTV=m
CONFIG_SMS_SIANO_RC=y
+# CONFIG_SMS_SIANO_DEBUGFS is not set
#
# Media ancillary drivers (tuners, sensors, i2c, frontends)
diff --git a/config/ppc/default b/config/ppc/default
index c493766a94..e958121866 100644
--- a/config/ppc/default
+++ b/config/ppc/default
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/powerpc 3.16.0 Kernel Configuration
+# Linux/powerpc 3.16.3 Kernel Configuration
#
# CONFIG_PPC64 is not set
diff --git a/config/ppc/vanilla b/config/ppc/vanilla
index 83f7fd15e9..1ffb35deb2 100644
--- a/config/ppc/vanilla
+++ b/config/ppc/vanilla
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/powerpc 3.16.0 Kernel Configuration
+# Linux/powerpc 3.16.3 Kernel Configuration
#
# CONFIG_PPC64 is not set
diff --git a/config/ppc64/debug b/config/ppc64/debug
index 9c29c5521f..5e9a212e0e 100644
--- a/config/ppc64/debug
+++ b/config/ppc64/debug
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/powerpc 3.16.0 Kernel Configuration
+# Linux/powerpc 3.16.3 Kernel Configuration
#
CONFIG_PPC64=y
diff --git a/config/ppc64/default b/config/ppc64/default
index 8adae51f87..ba6e0eb2ca 100644
--- a/config/ppc64/default
+++ b/config/ppc64/default
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/powerpc 3.16.0 Kernel Configuration
+# Linux/powerpc 3.16.3 Kernel Configuration
#
CONFIG_PPC64=y
diff --git a/config/ppc64/vanilla b/config/ppc64/vanilla
index 9fd3a84579..ad07269704 100644
--- a/config/ppc64/vanilla
+++ b/config/ppc64/vanilla
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/powerpc 3.16.0 Kernel Configuration
+# Linux/powerpc 3.16.3 Kernel Configuration
#
CONFIG_PPC64=y
diff --git a/config/ppc64le/debug b/config/ppc64le/debug
index 7e8244178c..d004fcc182 100644
--- a/config/ppc64le/debug
+++ b/config/ppc64le/debug
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/powerpc 3.16.0 Kernel Configuration
+# Linux/powerpc 3.16.3 Kernel Configuration
#
CONFIG_PPC64=y
diff --git a/config/ppc64le/default b/config/ppc64le/default
index 780b81c425..ce1efbcf29 100644
--- a/config/ppc64le/default
+++ b/config/ppc64le/default
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/powerpc 3.16.0 Kernel Configuration
+# Linux/powerpc 3.16.3 Kernel Configuration
#
CONFIG_PPC64=y
diff --git a/config/ppc64le/vanilla b/config/ppc64le/vanilla
index 6f8bed19ba..da127d8382 100644
--- a/config/ppc64le/vanilla
+++ b/config/ppc64le/vanilla
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/powerpc 3.16.0 Kernel Configuration
+# Linux/powerpc 3.16.3 Kernel Configuration
#
CONFIG_PPC64=y
diff --git a/config/s390x/default b/config/s390x/default
index fe8660f4ef..433ccc802f 100644
--- a/config/s390x/default
+++ b/config/s390x/default
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/s390 3.16.0 Kernel Configuration
+# Linux/s390 3.16.3 Kernel Configuration
#
CONFIG_MMU=y
CONFIG_ZONE_DMA=y
@@ -341,6 +341,9 @@ CONFIG_INLINE_WRITE_UNLOCK=y
CONFIG_INLINE_WRITE_UNLOCK_BH=y
CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y
+CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
+CONFIG_MUTEX_SPIN_ON_OWNER=y
+CONFIG_RWSEM_SPIN_ON_OWNER=y
CONFIG_FREEZER=y
#
diff --git a/config/s390x/vanilla b/config/s390x/vanilla
index ddcbad1789..d1550504f2 100644
--- a/config/s390x/vanilla
+++ b/config/s390x/vanilla
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/s390 3.16.0 Kernel Configuration
+# Linux/s390 3.16.3 Kernel Configuration
#
CONFIG_MMU=y
CONFIG_ZONE_DMA=y
@@ -339,6 +339,9 @@ CONFIG_INLINE_WRITE_UNLOCK=y
CONFIG_INLINE_WRITE_UNLOCK_BH=y
CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y
+CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
+CONFIG_MUTEX_SPIN_ON_OWNER=y
+CONFIG_RWSEM_SPIN_ON_OWNER=y
CONFIG_FREEZER=y
#
diff --git a/config/x86_64/debug b/config/x86_64/debug
index 60df5dd84f..a4d6b39ced 100644
--- a/config/x86_64/debug
+++ b/config/x86_64/debug
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/x86_64 3.16.0 Kernel Configuration
+# Linux/x86_64 3.16.3 Kernel Configuration
#
CONFIG_64BIT=y
CONFIG_X86_64=y
@@ -4244,6 +4244,7 @@ CONFIG_VIDEO_SAA7146=m
CONFIG_VIDEO_SAA7146_VV=m
CONFIG_SMS_SIANO_MDTV=m
CONFIG_SMS_SIANO_RC=y
+# CONFIG_SMS_SIANO_DEBUGFS is not set
#
# Media ancillary drivers (tuners, sensors, i2c, frontends)
diff --git a/config/x86_64/default b/config/x86_64/default
index 79f9c5b57f..feeb86e719 100644
--- a/config/x86_64/default
+++ b/config/x86_64/default
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/x86_64 3.16.0 Kernel Configuration
+# Linux/x86_64 3.16.3 Kernel Configuration
#
CONFIG_64BIT=y
CONFIG_X86_64=y
@@ -4234,6 +4234,7 @@ CONFIG_VIDEO_SAA7146=m
CONFIG_VIDEO_SAA7146_VV=m
CONFIG_SMS_SIANO_MDTV=m
CONFIG_SMS_SIANO_RC=y
+# CONFIG_SMS_SIANO_DEBUGFS is not set
#
# Media ancillary drivers (tuners, sensors, i2c, frontends)
diff --git a/config/x86_64/desktop b/config/x86_64/desktop
index 4d54319894..cd25759e34 100644
--- a/config/x86_64/desktop
+++ b/config/x86_64/desktop
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/x86_64 3.16.0 Kernel Configuration
+# Linux/x86_64 3.16.3 Kernel Configuration
#
CONFIG_64BIT=y
CONFIG_X86_64=y
@@ -4233,6 +4233,7 @@ CONFIG_VIDEO_SAA7146=m
CONFIG_VIDEO_SAA7146_VV=m
CONFIG_SMS_SIANO_MDTV=m
CONFIG_SMS_SIANO_RC=y
+# CONFIG_SMS_SIANO_DEBUGFS is not set
#
# Media ancillary drivers (tuners, sensors, i2c, frontends)
diff --git a/config/x86_64/ec2 b/config/x86_64/ec2
index 8def1a60e7..6cb386efb7 100644
--- a/config/x86_64/ec2
+++ b/config/x86_64/ec2
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/x86_64 3.16.0 Kernel Configuration
+# Linux/x86_64 3.16.3 Kernel Configuration
#
CONFIG_64BIT=y
CONFIG_X86_64=y
diff --git a/config/x86_64/vanilla b/config/x86_64/vanilla
index 31e4b776a1..6b2b451d59 100644
--- a/config/x86_64/vanilla
+++ b/config/x86_64/vanilla
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/x86_64 3.16.0 Kernel Configuration
+# Linux/x86_64 3.16.3 Kernel Configuration
#
CONFIG_64BIT=y
CONFIG_X86_64=y
@@ -4230,6 +4230,7 @@ CONFIG_VIDEO_SAA7146=m
CONFIG_VIDEO_SAA7146_VV=m
CONFIG_SMS_SIANO_MDTV=m
CONFIG_SMS_SIANO_RC=y
+# CONFIG_SMS_SIANO_DEBUGFS is not set
#
# Media ancillary drivers (tuners, sensors, i2c, frontends)
diff --git a/config/x86_64/xen b/config/x86_64/xen
index 185ad29094..382266429f 100644
--- a/config/x86_64/xen
+++ b/config/x86_64/xen
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/x86_64 3.16.2 Kernel Configuration
+# Linux/x86_64 3.16.3 Kernel Configuration
#
CONFIG_64BIT=y
CONFIG_X86_64=y
@@ -4013,6 +4013,7 @@ CONFIG_VIDEO_SAA7146=m
CONFIG_VIDEO_SAA7146_VV=m
CONFIG_SMS_SIANO_MDTV=m
CONFIG_SMS_SIANO_RC=y
+# CONFIG_SMS_SIANO_DEBUGFS is not set
#
# Media ancillary drivers (tuners, sensors, i2c, frontends)
diff --git a/patches.fixes/reiserfs-fix-corruption-introduced-by-balance_leaf-refactor b/patches.fixes/reiserfs-fix-corruption-introduced-by-balance_leaf-refactor
deleted file mode 100644
index 9aeffcd3ac..0000000000
--- a/patches.fixes/reiserfs-fix-corruption-introduced-by-balance_leaf-refactor
+++ /dev/null
@@ -1,376 +0,0 @@
-From: Jeff Mahoney <jeffm@suse.com>
-Subject: reiserfs: fix corruption introduced by balance_leaf refactor
-Patch-mainline: submitted to reiserfs-devel, 4 Aug 2014
-
-Commits f1f007c308e (reiserfs: balance_leaf refactor, pull out
-balance_leaf_insert_left) and cf22df182bf (reiserfs: balance_leaf
-refactor, pull out balance_leaf_paste_left) missed that the `body'
-pointer was getting repositioned. Subsequent users of the pointer
-would expect it to be repositioned, and as a result, parts of the
-tree would get overwritten. The most common observed corruption
-is indirect block pointers being overwritten.
-
-Since the body value isn't actually used anymore in the called routines,
-we can pass back the offset it should be shifted. We constify the body
-and ih pointers in the balance_leaf as a mostly-free preventative measure.
-
-Cc: <stable@vger.kernel.org> # 3.16
-Reported-by: Jeff Chua <jeff.chua.linux@gmail.com>
-Signed-off-by: Jeff Mahoney <jeffm@suse.com>
----
- fs/reiserfs/do_balan.c | 111 +++++++++++++++++++++++++++----------------------
- fs/reiserfs/lbalance.c | 5 +-
- fs/reiserfs/reiserfs.h | 9 ++-
- 3 files changed, 71 insertions(+), 54 deletions(-)
-
---- a/fs/reiserfs/do_balan.c
-+++ b/fs/reiserfs/do_balan.c
-@@ -286,12 +286,14 @@ static int balance_leaf_when_delete(stru
- return 0;
- }
-
--static void balance_leaf_insert_left(struct tree_balance *tb,
-- struct item_head *ih, const char *body)
-+static unsigned int balance_leaf_insert_left(struct tree_balance *tb,
-+ struct item_head *const ih,
-+ const char * const body)
- {
- int ret;
- struct buffer_info bi;
- int n = B_NR_ITEMS(tb->L[0]);
-+ unsigned body_shift_bytes = 0;
-
- if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) {
- /* part of new item falls into L[0] */
-@@ -329,7 +331,7 @@ static void balance_leaf_insert_left(str
-
- put_ih_item_len(ih, new_item_len);
- if (tb->lbytes > tb->zeroes_num) {
-- body += (tb->lbytes - tb->zeroes_num);
-+ body_shift_bytes = tb->lbytes - tb->zeroes_num;
- tb->zeroes_num = 0;
- } else
- tb->zeroes_num -= tb->lbytes;
-@@ -349,11 +351,12 @@ static void balance_leaf_insert_left(str
- tb->insert_size[0] = 0;
- tb->zeroes_num = 0;
- }
-+ return body_shift_bytes;
- }
-
- static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb,
-- struct item_head *ih,
-- const char *body)
-+ struct item_head * const ih,
-+ const char * const body)
- {
- int n = B_NR_ITEMS(tb->L[0]);
- struct buffer_info bi;
-@@ -413,17 +416,18 @@ static void balance_leaf_paste_left_shif
- tb->pos_in_item -= tb->lbytes;
- }
-
--static void balance_leaf_paste_left_shift(struct tree_balance *tb,
-- struct item_head *ih,
-- const char *body)
-+static unsigned int balance_leaf_paste_left_shift(struct tree_balance *tb,
-+ struct item_head * const ih,
-+ const char * const body)
- {
- struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
- int n = B_NR_ITEMS(tb->L[0]);
- struct buffer_info bi;
-+ int body_shift_bytes = 0;
-
- if (is_direntry_le_ih(item_head(tbS0, tb->item_pos))) {
- balance_leaf_paste_left_shift_dirent(tb, ih, body);
-- return;
-+ return 0;
- }
-
- RFALSE(tb->lbytes <= 0,
-@@ -497,7 +501,7 @@ static void balance_leaf_paste_left_shif
- * insert_size[0]
- */
- if (l_n > tb->zeroes_num) {
-- body += (l_n - tb->zeroes_num);
-+ body_shift_bytes = l_n - tb->zeroes_num;
- tb->zeroes_num = 0;
- } else
- tb->zeroes_num -= l_n;
-@@ -526,13 +530,14 @@ static void balance_leaf_paste_left_shif
- */
- leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
- }
-+ return body_shift_bytes;
- }
-
-
- /* appended item will be in L[0] in whole */
- static void balance_leaf_paste_left_whole(struct tree_balance *tb,
-- struct item_head *ih,
-- const char *body)
-+ struct item_head * const ih,
-+ const char * const body)
- {
- struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
- int n = B_NR_ITEMS(tb->L[0]);
-@@ -584,39 +589,44 @@ static void balance_leaf_paste_left_whol
- tb->zeroes_num = 0;
- }
-
--static void balance_leaf_paste_left(struct tree_balance *tb,
-- struct item_head *ih, const char *body)
-+static unsigned int balance_leaf_paste_left(struct tree_balance *tb,
-+ struct item_head * const ih,
-+ const char * const body)
- {
- /* we must shift the part of the appended item */
- if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1)
-- balance_leaf_paste_left_shift(tb, ih, body);
-+ return balance_leaf_paste_left_shift(tb, ih, body);
- else
- balance_leaf_paste_left_whole(tb, ih, body);
-+ return 0;
- }
-
- /* Shift lnum[0] items from S[0] to the left neighbor L[0] */
--static void balance_leaf_left(struct tree_balance *tb, struct item_head *ih,
-- const char *body, int flag)
-+static unsigned int balance_leaf_left(struct tree_balance *tb,
-+ struct item_head * const ih,
-+ const char * const body, int flag)
- {
- if (tb->lnum[0] <= 0)
-- return;
-+ return 0;
-
- /* new item or it part falls to L[0], shift it too */
- if (tb->item_pos < tb->lnum[0]) {
- BUG_ON(flag != M_INSERT && flag != M_PASTE);
-
- if (flag == M_INSERT)
-- balance_leaf_insert_left(tb, ih, body);
-+ return balance_leaf_insert_left(tb, ih, body);
- else /* M_PASTE */
-- balance_leaf_paste_left(tb, ih, body);
-+ return balance_leaf_paste_left(tb, ih, body);
- } else
- /* new item doesn't fall into L[0] */
- leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
-+ return 0;
- }
-
-
- static void balance_leaf_insert_right(struct tree_balance *tb,
-- struct item_head *ih, const char *body)
-+ struct item_head * const ih,
-+ const char * const body)
- {
-
- struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
-@@ -704,7 +714,8 @@ static void balance_leaf_insert_right(st
-
-
- static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb,
-- struct item_head *ih, const char *body)
-+ struct item_head * const ih,
-+ const char * const body)
- {
- struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
- struct buffer_info bi;
-@@ -754,7 +765,8 @@ static void balance_leaf_paste_right_shi
- }
-
- static void balance_leaf_paste_right_shift(struct tree_balance *tb,
-- struct item_head *ih, const char *body)
-+ struct item_head * const ih,
-+ const char * const body)
- {
- struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
- int n_shift, n_rem, r_zeroes_number, version;
-@@ -831,7 +843,8 @@ static void balance_leaf_paste_right_shi
- }
-
- static void balance_leaf_paste_right_whole(struct tree_balance *tb,
-- struct item_head *ih, const char *body)
-+ struct item_head * const ih,
-+ const char * const body)
- {
- struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
- int n = B_NR_ITEMS(tbS0);
-@@ -874,7 +887,8 @@ static void balance_leaf_paste_right_who
- }
-
- static void balance_leaf_paste_right(struct tree_balance *tb,
-- struct item_head *ih, const char *body)
-+ struct item_head * const ih,
-+ const char * const body)
- {
- struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
- int n = B_NR_ITEMS(tbS0);
-@@ -896,8 +910,9 @@ static void balance_leaf_paste_right(str
- }
-
- /* shift rnum[0] items from S[0] to the right neighbor R[0] */
--static void balance_leaf_right(struct tree_balance *tb, struct item_head *ih,
-- const char *body, int flag)
-+static void balance_leaf_right(struct tree_balance *tb,
-+ struct item_head * const ih,
-+ const char * const body, int flag)
- {
- if (tb->rnum[0] <= 0)
- return;
-@@ -911,8 +926,8 @@ static void balance_leaf_right(struct tr
- }
-
- static void balance_leaf_new_nodes_insert(struct tree_balance *tb,
-- struct item_head *ih,
-- const char *body,
-+ struct item_head * const ih,
-+ const char * const body,
- struct item_head *insert_key,
- struct buffer_head **insert_ptr,
- int i)
-@@ -1003,8 +1018,8 @@ static void balance_leaf_new_nodes_inser
-
- /* we append to directory item */
- static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb,
-- struct item_head *ih,
-- const char *body,
-+ struct item_head * const ih,
-+ const char * const body,
- struct item_head *insert_key,
- struct buffer_head **insert_ptr,
- int i)
-@@ -1058,8 +1073,8 @@ static void balance_leaf_new_nodes_paste
- }
-
- static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb,
-- struct item_head *ih,
-- const char *body,
-+ struct item_head * const ih,
-+ const char * const body,
- struct item_head *insert_key,
- struct buffer_head **insert_ptr,
- int i)
-@@ -1131,8 +1146,8 @@ static void balance_leaf_new_nodes_paste
- }
-
- static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb,
-- struct item_head *ih,
-- const char *body,
-+ struct item_head * const ih,
-+ const char * const body,
- struct item_head *insert_key,
- struct buffer_head **insert_ptr,
- int i)
-@@ -1184,8 +1199,8 @@ static void balance_leaf_new_nodes_paste
-
- }
- static void balance_leaf_new_nodes_paste(struct tree_balance *tb,
-- struct item_head *ih,
-- const char *body,
-+ struct item_head * const ih,
-+ const char * const body,
- struct item_head *insert_key,
- struct buffer_head **insert_ptr,
- int i)
-@@ -1214,8 +1229,8 @@ static void balance_leaf_new_nodes_paste
-
- /* Fill new nodes that appear in place of S[0] */
- static void balance_leaf_new_nodes(struct tree_balance *tb,
-- struct item_head *ih,
-- const char *body,
-+ struct item_head * const ih,
-+ const char * const body,
- struct item_head *insert_key,
- struct buffer_head **insert_ptr,
- int flag)
-@@ -1254,8 +1269,8 @@ static void balance_leaf_new_nodes(struc
- }
-
- static void balance_leaf_finish_node_insert(struct tree_balance *tb,
-- struct item_head *ih,
-- const char *body)
-+ struct item_head * const ih,
-+ const char * const body)
- {
- struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
- struct buffer_info bi;
-@@ -1271,8 +1286,8 @@ static void balance_leaf_finish_node_ins
- }
-
- static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb,
-- struct item_head *ih,
-- const char *body)
-+ struct item_head * const ih,
-+ const char * const body)
- {
- struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
- struct item_head *pasted = item_head(tbS0, tb->item_pos);
-@@ -1305,8 +1320,8 @@ static void balance_leaf_finish_node_pas
- }
-
- static void balance_leaf_finish_node_paste(struct tree_balance *tb,
-- struct item_head *ih,
-- const char *body)
-+ struct item_head * const ih,
-+ const char * const body)
- {
- struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
- struct buffer_info bi;
-@@ -1349,8 +1364,8 @@ static void balance_leaf_finish_node_pas
- * of the affected item which remains in S
- */
- static void balance_leaf_finish_node(struct tree_balance *tb,
-- struct item_head *ih,
-- const char *body, int flag)
-+ struct item_head * const ih,
-+ const char * const body, int flag)
- {
- /* if we must insert or append into buffer S[0] */
- if (0 <= tb->item_pos && tb->item_pos < tb->s0num) {
-@@ -1402,7 +1417,7 @@ static int balance_leaf(struct tree_bala
- && is_indirect_le_ih(item_head(tbS0, tb->item_pos)))
- tb->pos_in_item *= UNFM_P_SIZE;
-
-- balance_leaf_left(tb, ih, body, flag);
-+ body += balance_leaf_left(tb, ih, body, flag);
-
- /* tb->lnum[0] > 0 */
- /* Calculate new item position */
---- a/fs/reiserfs/lbalance.c
-+++ b/fs/reiserfs/lbalance.c
-@@ -899,8 +899,9 @@ void leaf_delete_items(struct buffer_inf
-
- /* insert item into the leaf node in position before */
- void leaf_insert_into_buf(struct buffer_info *bi, int before,
-- struct item_head *inserted_item_ih,
-- const char *inserted_item_body, int zeros_number)
-+ struct item_head * const inserted_item_ih,
-+ const char * const inserted_item_body,
-+ int zeros_number)
- {
- struct buffer_head *bh = bi->bi_bh;
- int nr, free_space;
---- a/fs/reiserfs/reiserfs.h
-+++ b/fs/reiserfs/reiserfs.h
-@@ -3216,11 +3216,12 @@ int leaf_shift_right(struct tree_balance
- void leaf_delete_items(struct buffer_info *cur_bi, int last_first, int first,
- int del_num, int del_bytes);
- void leaf_insert_into_buf(struct buffer_info *bi, int before,
-- struct item_head *inserted_item_ih,
-- const char *inserted_item_body, int zeros_number);
--void leaf_paste_in_buffer(struct buffer_info *bi, int pasted_item_num,
-- int pos_in_item, int paste_size, const char *body,
-+ struct item_head * const inserted_item_ih,
-+ const char * const inserted_item_body,
- int zeros_number);
-+void leaf_paste_in_buffer(struct buffer_info *bi, int pasted_item_num,
-+ int pos_in_item, int paste_size,
-+ const char * const body, int zeros_number);
- void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num,
- int pos_in_item, int cut_size);
- void leaf_paste_entries(struct buffer_info *bi, int item_num, int before,
-
diff --git a/patches.fixes/rtsx_usb-export-device-table b/patches.fixes/rtsx_usb-export-device-table
deleted file mode 100644
index 9883682b96..0000000000
--- a/patches.fixes/rtsx_usb-export-device-table
+++ /dev/null
@@ -1,28 +0,0 @@
-From: Jeff Mahoney <jeffm@suse.com>
-Subject: rtsx_usb: export device table
-References: bnc#890096
-Patch-mainline: Submitted to LKML 8 Aug 2014
-
-The rtsx_usb driver contains the table for the devices it supports but
-doesn't export it. As a result, no alias is generated and it doesn't
-get loaded automatically.
-
-Via https://bugzilla.novell.com/show_bug.cgi?id=890096
-
-Reported-by: Marcel Witte <wittemar@googlemail.com>
-Signed-off-by: Jeff Mahoney <jeffm@suse.com>
----
- drivers/mfd/rtsx_usb.c | 1 +
- 1 file changed, 1 insertion(+)
-
---- a/drivers/mfd/rtsx_usb.c
-+++ b/drivers/mfd/rtsx_usb.c
-@@ -744,6 +744,7 @@ static struct usb_device_id rtsx_usb_usb
- { USB_DEVICE(0x0BDA, 0x0140) },
- { }
- };
-+MODULE_DEVICE_TABLE(usb, rtsx_usb_usb_ids);
-
- static struct usb_driver rtsx_usb_driver = {
- .name = "rtsx_usb",
-
diff --git a/patches.kernel.org/patch-3.16.2-3 b/patches.kernel.org/patch-3.16.2-3
new file mode 100644
index 0000000000..1cfc08c66b
--- /dev/null
+++ b/patches.kernel.org/patch-3.16.2-3
@@ -0,0 +1,7306 @@
+From: Jiri Slaby <jslaby@suse.cz>
+Subject: Linux 3.16.3
+Patch-mainline: 3.16.3
+References: bnc#890096
+Git-commit: 95389b08d93d5c06ec63ab49bd732b0069b7c35e
+Git-commit: 27419604f51a97d497853f14142c1059d46eb597
+Git-commit: 52755808d4525f4d5b86d112d36ffc7a46f3fb48
+Git-commit: 99d263d4c5b2f541dfacb5391e22e8c91ea982a6
+Git-commit: 7820e5eef0faa4a5e10834296680827f7ce78a89
+Git-commit: 9cba5efab5a8145ae6c52ea273553f069c294482
+Git-commit: bcc05910359183b431da92713e98eed478edf83a
+Git-commit: 200612ec33e555a356eebc717630b866ae2b694f
+Git-commit: 40ddbf5069bd4e11447c0088fc75318e0aac53f0
+Git-commit: a152056c912db82860a8b4c23d0bd3a5aa89e363
+Git-commit: f736906a7669a77cf8cabdcbcf1dc8cb694e12ef
+Git-commit: 1bbe4997b13de903c421c1cc78440e544b5f9064
+Git-commit: a07d322059db66b84c9eb4f98959df468e88b34b
+Git-commit: b46799a8f28c43c5264ac8d8ffa28b311b557e03
+Git-commit: 18f39e7be0121317550d03e267e3ebd4dbfbb3ce
+Git-commit: 038bc961c31b070269ecd07349a7ee2e839d4fec
+Git-commit: 21496687a79424572f46a84c690d331055f4866f
+Git-commit: c27a3e4d667fdcad3db7b104f75659478e0c68d8
+Git-commit: 597cda357716a3cf8d994cb11927af917c8d71fa
+Git-commit: 73c3d4812b4c755efeca0140f606f83772a39ce4
+Git-commit: 5f740d7e1531099b888410e6bab13f68da9b1a4d
+Git-commit: 85e584da3212140ee80fd047f9058bbee0bc00d5
+Git-commit: 834ffca6f7e345a79f6f2e2d131b0dfba8a4b67a
+Git-commit: 22e757a49cf010703fcb9c9b4ef793248c39b0c2
+Git-commit: 5fd364fee81a7888af806e42ed8a91c845894f2d
+Git-commit: 67dc288c21064b31a98a53dc64f6b9714b819fd6
+Git-commit: db1044d458a287c18c4d413adc4ad12e92e253b5
+Git-commit: 2f0304d21867476394cd51a54e97f7273d112261
+Git-commit: b39685526f46976bcd13aa08c82480092befa46c
+Git-commit: ce0b0a46955d1bb389684a2605dbcaa990ba0154
+Git-commit: 9c4bdf697c39805078392d5ddbbba5ae5680e0dd
+Git-commit: a40687ff73a5b14909d6aa522f7d778b158911c5
+Git-commit: 2446dba03f9dabe0b477a126cbeb377854785b47
+Git-commit: 12a5b5294cb1896e9a3c9fca8ff5a7e3def4e8c6
+Git-commit: bc1ecc65a259fa9333dc8bd6a4ba0cf03b7d4bf8
+Git-commit: 60bb45297f7551833346c5cebc6d483ea17ea5f2
+Git-commit: 32333edb82fb2009980eefc5518100068147ab82
+Git-commit: 9f743d7499bc2c4dc8c35af33bdb2a29bea663b9
+Git-commit: 093facf3634da1b0c2cc7ed106f1983da901bbab
+Git-commit: 6c53823ae0e10e723131055e1e65dd6a328a228e
+Git-commit: 42bd6a56ed1ab4b2cb50f4d4e674874da9b47f46
+Git-commit: 396e04f4bb9afefb0744715dc76d9abe18ee5fb0
+Git-commit: 81b6b06197606b4bef4e427a197aeb808e8d89e1
+Git-commit: 88b368f27a094277143d8ecd5a056116f6a41520
+Git-commit: db181ce011e3c033328608299cd6fac06ea50130
+Git-commit: ffbc6f0ead47fa5a1dc9642b0331cb75c20a640e
+Git-commit: 9566d6742852c527bf5af38af5cbb878dad75705
+Git-commit: 07b645589dcda8b7a5249e096fece2a67556f0f4
+Git-commit: a6138db815df5ee542d848318e5dae681590fccd
+Git-commit: 021de3d904b88b1771a3a2cfc5b75023c391e646
+Git-commit: 651e22f2701b4113989237c3048d17337dd2185c
+Git-commit: c12784c3d14a2110468ec4d1383f60cfd2665576
+Git-commit: 84c34858a85ecf9dabd72847d860c7d3fb7536e7
+Git-commit: 5f24079b021cd3147c8d24ba65833f7a0df7e80d
+Git-commit: 25294e9f00f03b2b4f4c56e913bc8c573972f33b
+Git-commit: 6726655dfdd2dc60c035c690d9f10cb69d7ea075
+Git-commit: a383b68d9fe9864c4d3b86f67ad6488f58136435
+Git-commit: 558e4736f2e1b0e6323adf7a5e4df77ed6cfc1a4
+Git-commit: 3afcf2ece453e1a8c2c6de19cdf06da3772a1b08
+Git-commit: fc2e0a8326d1b21d11ef8213298e5302867fed2c
+Git-commit: 236105db632c6279a020f78c83e22eaef746006b
+Git-commit: aca26364689e00e3b2052072424682231bdae6ae
+Git-commit: dee1592638ab7ea35a32179b73f9284dead49c03
+Git-commit: e23d9b8297546c6ceb7e70771e4915f2a41733cd
+Git-commit: 8aa5e56eeb61a099ea6519eb30ee399e1bc043ce
+Git-commit: 3c0185046c0ee49a6e55c714612ef3bcd5385df3
+Git-commit: 03a6c3ff3282ee9fa893089304d951e0be93a144
+Git-commit: fdaf42c0105a24de8aefa60f6f7360842c4e673e
+Git-commit: f4821e8e8e957fe4c601a49b9a97b7399d5f7ab1
+Git-commit: d1555c407a65db42126b295425379acb393ba83a
+Git-commit: 9301503af016eb537ccce76adec0c1bb5c84871e
+Git-commit: 4548728981de259d7d37d0ae968a777b09794168
+Git-commit: 8e89761876611f06ef4be865b4780b4361caf4af
+Git-commit: a72d2abbe5752f3a773c4d8b7b41ae41f617f772
+Git-commit: 4adeb0ccf86a5af1825bbfe290dee9e60a5ab870
+Git-commit: 3ad80b828b2533f37c221e2df155774efd6ed814
+Git-commit: d3d4e5247b013008a39e4d5f69ce4c60ed57f997
+Git-commit: 30443408fd7201fd1911b09daccf92fae3cc700d
+Git-commit: 0a37c6efec4a2fdc2563c5a8faa472b814deee80
+Git-commit: 7ed9de76ff342cbd717a9cf897044b99272cb8f8
+Git-commit: b38314179c9ccb789e6fe967cff171fa817e8978
+Git-commit: 6521d9a436a62e83ce57d6be6e5484e1098c1380
+Git-commit: ca4d24f7954f3746742ba350c2276ff777f21173
+Git-commit: f85b71ceabb9d8d8a9e34b045b5c43ffde3623b3
+Git-commit: 608308682addfdc7b8e2aee88f0e028331d88e4d
+Git-commit: 64615682658373516863b5b5971ff1d922d0ae7b
+Git-commit: 5245689900804604fdc349c8d9b8985b0e401ae2
+Git-commit: 40381529f84c4cda3bd2d20cab6a707508856b21
+Git-commit: bcec7c8da6b092b1ff3327fd83c2193adb12f684
+Git-commit: 2e5767a27337812f6850b3fa362419e2f085e5c3
+Git-commit: 8393c524a25609a30129e4a8975cf3b91f6c16a5
+Git-commit: b1442d39fac2fcfbe6a4814979020e993ca59c9e
+Git-commit: 7d907fa1c6ccb64c7f64cc7d3dcc7f6fe30a67b4
+Git-commit: 656ff9bef08c19a6471b49528dacb4cbbeb1e537
+Git-commit: c23b3d1a53119849dc3c23c417124deb067aa33d
+Git-commit: 65768a1a92cb12cbba87588927cf597a65d560aa
+Git-commit: e90e6fddc57055c4c6b57f92787fea1c065d440b
+Git-commit: ffc8415afab20bd97754efae6aad1f67b531132b
+Git-commit: 2ba136daa3ae1e881c9f586f283fcaa164767dce
+Git-commit: f6105c0808880c2c432b79bc81b37cc244c300c8
+Git-commit: cd53eb686d2418eda938aad3c9da42b7dfa9351f
+Git-commit: 0213436a2cc5e4a5ca2fabfaa4d3877097f3b13f
+Git-commit: c1d40a527e885a40bb9ea6c46a1b1145d42b66a0
+Git-commit: 22ffeb48b7584d6cd50f2a595ed6065d86a87459
+Git-commit: 3533f8603d28b77c62d75ec899449a99bc6b77a1
+Git-commit: f885fb73f64154690c2158e813de56363389ffec
+Git-commit: adb6f9e1a8c6af1037232b59edb11277471537ea
+Git-commit: 52f9614dd8294e95d2c0929c2d4f64b077ae486f
+Git-commit: 4cd83ecdac20d30725b4f96e5d7814a1e290bc7e
+Git-commit: 8caf92d80526f3d7cc96831ec18b384ebcaccdf0
+Git-commit: 56b26e69c8283121febedd12b3cc193384af46b9
+Git-commit: 884ffee01ddde5af260c7a5d1359c658aa1f0a11
+Git-commit: 7e467245bf5226db34c4b12d3cbacfa2f7a15a8b
+Git-commit: 969b7b208f7408712a3526856e4ae60ad13f6928
+Git-commit: fc0479557572375100ef16c71170b29a98e0d69a
+Git-commit: 629149fae478f0ac6bf705a535708b192e9c6b59
+Git-commit: fa1f8ae80f8bb996594167ff4750a0b0a5a5bb5d
+Git-commit: b0aa44a3dfae3d8f45bd1264349aa87f87b7774f
+Git-commit: 5efbabe09d986f25c02d19954660238fcd7f008a
+Git-commit: f1b3929c232784580e5d8ee324b6bc634e709575
+Git-commit: 85c1fafd7262e68ad821ee1808686b1392b1167d
+Git-commit: b00fc6ec1f24f9d7af9b8988b6a198186eb3408c
+Git-commit: 95707d852856aec1cbdad1873ff2dc5161a5cb91
+Git-commit: 42ab0f3915f22728f54bb1f3c0dcf38ab2335b5b
+Git-commit: 5b919f3ebb533cbe400664837e24f66a0836b907
+Git-commit: daebabd578647440d41fc9b48d8c7a88dc2f7ab5
+Git-commit: 1813908986e36119228c158aae1c6a0267c99e77
+Git-commit: 46de8ff8e80a6546aa3d2fdf58c6776666301a0c
+Git-commit: 618fde872163e782183ce574c77f1123e2be8887
+Git-commit: d0177639310d23c7739500df3c6ce6fdfe34acec
+Git-commit: e04aca4a769e16cf4f9b8a4bd3e761711640dc46
+Git-commit: 2b462638e41ea62230297c21c4da9955937b7a3c
+Git-commit: 5838d4442bd5971687b72221736222637e03140d
+Git-commit: 7d8b6c63751cfbbe5eef81a48c22978b3407a3ad
+Git-commit: b49e1043c48dac23f64fba684d31c4a96c1ffaa0
+Git-commit: 8e54caf407b98efa05409e1fee0e5381abd2b088
+Git-commit: 3e14d83ef94a5806a865b85b513b4e891923c19b
+Git-commit: aee530cfecf4f3ec83b78406bac618cec35853f8
+Git-commit: 0e16e4cfde70e1cf00f9fe3a8f601d10e73e0ec6
+Git-commit: 6174bac8c7ff73a86ae9a967d1c9cadc478023ae
+Git-commit: 36e7fdaa1a04fcf65b864232e1af56a51c7814d6
+Git-commit: 97ca0d6cc118716840ea443e010cb3d5f2d25eaf
+Git-commit: e06871cd2c92e5c65d7ca1d32866b4ca5dd4ac30
+Git-commit: 9b29d3c6510407d91786c1cf9183ff4debb3473a
+Git-commit: e7f9fa5498d91fcdc63d93007ba43f36b1a30538
+Git-commit: 763fe0addb8fe15ccea67c0aebddc06f4bb25439
+Git-commit: c875d2c1b8083cd627ea0463e20bf22c2d7421ee
+Git-commit: 3c4b422adb7694418848cefc2a4669d63192c649
+Git-commit: bd994ddb2a12a3ff48cd549ec82cdceaea9614df
+Git-commit: e51daefc228aa164adcc17fe8fce0f856ad0a1cc
+Git-commit: f17bc3f4707eb87bdb80b895911c551cdd606fbd
+Git-commit: f8ca6ac00d2ba24c5557f08f81439cd3432f0802
+Git-commit: 64ea37bbd8a5815522706f0099ad3f11c7537e15
+Git-commit: 4c07e32884ab69574cfd9eb4de3334233c938071
+Git-commit: a3eec916cbc17dc1aaa3ddf120836cd5200eb4ef
+Git-commit: 01777836c87081e4f68c4a43c9abe6114805f91e
+Git-commit: 27d0e5bc85f3341b9ba66f0c23627cf9d7538c9d
+
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+diff --git a/Documentation/devicetree/bindings/sound/adi,axi-spdif-tx.txt b/Documentation/devicetree/bindings/sound/adi,axi-spdif-tx.txt
+index 46f344965313..4eb7997674a0 100644
+--- a/Documentation/devicetree/bindings/sound/adi,axi-spdif-tx.txt
++++ b/Documentation/devicetree/bindings/sound/adi,axi-spdif-tx.txt
+@@ -1,7 +1,7 @@
+ ADI AXI-SPDIF controller
+
+ Required properties:
+- - compatible : Must be "adi,axi-spdif-1.00.a"
++ - compatible : Must be "adi,axi-spdif-tx-1.00.a"
+ - reg : Must contain SPDIF core's registers location and length
+ - clocks : Pairs of phandle and specifier referencing the controller's clocks.
+ The controller expects two clocks, the clock used for the AXI interface and
+diff --git a/Makefile b/Makefile
+index c2617526e605..9b25a830a9d7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 16
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Museum of Fishiegoodies
+
+diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
+index b15f1a77d684..1fe45d1f75ec 100644
+--- a/arch/arm/boot/dts/omap3-n900.dts
++++ b/arch/arm/boot/dts/omap3-n900.dts
+@@ -353,7 +353,7 @@
+ };
+
+ twl_power: power {
+- compatible = "ti,twl4030-power-n900";
++ compatible = "ti,twl4030-power-n900", "ti,twl4030-power-idle-osc-off";
+ ti,use_poweroff;
+ };
+ };
+diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
+index 008e9c8b8eac..c9d9c627e244 100644
+--- a/arch/mips/cavium-octeon/setup.c
++++ b/arch/mips/cavium-octeon/setup.c
+@@ -458,6 +458,18 @@ static void octeon_halt(void)
+ octeon_kill_core(NULL);
+ }
+
++static char __read_mostly octeon_system_type[80];
++
++static int __init init_octeon_system_type(void)
++{
++ snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)",
++ cvmx_board_type_to_string(octeon_bootinfo->board_type),
++ octeon_model_get_string(read_c0_prid()));
++
++ return 0;
++}
++early_initcall(init_octeon_system_type);
++
+ /**
+ * Return a string representing the system type
+ *
+@@ -465,11 +477,7 @@ static void octeon_halt(void)
+ */
+ const char *octeon_board_type_string(void)
+ {
+- static char name[80];
+- sprintf(name, "%s (%s)",
+- cvmx_board_type_to_string(octeon_bootinfo->board_type),
+- octeon_model_get_string(read_c0_prid()));
+- return name;
++ return octeon_system_type;
+ }
+
+ const char *get_system_type(void)
+diff --git a/arch/mips/include/asm/eva.h b/arch/mips/include/asm/eva.h
+new file mode 100644
+index 000000000000..a3d1807f227c
+--- /dev/null
++++ b/arch/mips/include/asm/eva.h
+@@ -0,0 +1,43 @@
++/*
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (C) 2014, Imagination Technologies Ltd.
++ *
++ * EVA functions for generic code
++ */
++
++#ifndef _ASM_EVA_H
++#define _ASM_EVA_H
++
++#include <kernel-entry-init.h>
++
++#ifdef __ASSEMBLY__
++
++#ifdef CONFIG_EVA
++
++/*
++ * EVA early init code
++ *
++ * Platforms must define their own 'platform_eva_init' macro in
++ * their kernel-entry-init.h header. This macro usually does the
++ * platform specific configuration of the segmentation registers,
++ * and it is normally called from assembly code.
++ *
++ */
++
++.macro eva_init
++platform_eva_init
++.endm
++
++#else
++
++.macro eva_init
++.endm
++
++#endif /* CONFIG_EVA */
++
++#endif /* __ASSEMBLY__ */
++
++#endif
+diff --git a/arch/mips/include/asm/mach-malta/kernel-entry-init.h b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
+index 77eeda77e73c..0cf8622db27f 100644
+--- a/arch/mips/include/asm/mach-malta/kernel-entry-init.h
++++ b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
+@@ -10,14 +10,15 @@
+ #ifndef __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
+ #define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
+
++#include <asm/regdef.h>
++#include <asm/mipsregs.h>
++
+ /*
+ * Prepare segments for EVA boot:
+ *
+ * This is in case the processor boots in legacy configuration
+ * (SI_EVAReset is de-asserted and CONFIG5.K == 0)
+ *
+- * On entry, t1 is loaded with CP0_CONFIG
+- *
+ * ========================= Mappings =============================
+ * Virtual memory Physical memory Mapping
+ * 0x00000000 - 0x7fffffff 0x80000000 - 0xfffffffff MUSUK (kuseg)
+@@ -30,12 +31,20 @@
+ *
+ *
+ * Lowmem is expanded to 2GB
++ *
++ * The following code uses the t0, t1, t2 and ra registers without
++ * previously preserving them.
++ *
+ */
+- .macro eva_entry
++ .macro platform_eva_init
++
++ .set push
++ .set reorder
+ /*
+ * Get Config.K0 value and use it to program
+ * the segmentation registers
+ */
++ mfc0 t1, CP0_CONFIG
+ andi t1, 0x7 /* CCA */
+ move t2, t1
+ ins t2, t1, 16, 3
+@@ -77,6 +86,8 @@
+ mtc0 t0, $16, 5
+ sync
+ jal mips_ihb
++
++ .set pop
+ .endm
+
+ .macro kernel_entry_setup
+@@ -95,7 +106,7 @@
+ sll t0, t0, 6 /* SC bit */
+ bgez t0, 9f
+
+- eva_entry
++ platform_eva_init
+ b 0f
+ 9:
+ /* Assume we came from YAMON... */
+@@ -127,8 +138,7 @@ nonsc_processor:
+ #ifdef CONFIG_EVA
+ sync
+ ehb
+- mfc0 t1, CP0_CONFIG
+- eva_entry
++ platform_eva_init
+ #endif
+ .endm
+
+diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
+index 7e6e682aece3..c301fa9b139f 100644
+--- a/arch/mips/include/asm/ptrace.h
++++ b/arch/mips/include/asm/ptrace.h
+@@ -23,7 +23,7 @@
+ struct pt_regs {
+ #ifdef CONFIG_32BIT
+ /* Pad bytes for argument save space on the stack. */
+- unsigned long pad0[6];
++ unsigned long pad0[8];
+ #endif
+
+ /* Saved main processor registers. */
+diff --git a/arch/mips/include/asm/reg.h b/arch/mips/include/asm/reg.h
+index 910e71a12466..b8343ccbc989 100644
+--- a/arch/mips/include/asm/reg.h
++++ b/arch/mips/include/asm/reg.h
+@@ -12,116 +12,194 @@
+ #ifndef __ASM_MIPS_REG_H
+ #define __ASM_MIPS_REG_H
+
+-
+-#if defined(CONFIG_32BIT) || defined(WANT_COMPAT_REG_H)
+-
+-#define EF_R0 6
+-#define EF_R1 7
+-#define EF_R2 8
+-#define EF_R3 9
+-#define EF_R4 10
+-#define EF_R5 11
+-#define EF_R6 12
+-#define EF_R7 13
+-#define EF_R8 14
+-#define EF_R9 15
+-#define EF_R10 16
+-#define EF_R11 17
+-#define EF_R12 18
+-#define EF_R13 19
+-#define EF_R14 20
+-#define EF_R15 21
+-#define EF_R16 22
+-#define EF_R17 23
+-#define EF_R18 24
+-#define EF_R19 25
+-#define EF_R20 26
+-#define EF_R21 27
+-#define EF_R22 28
+-#define EF_R23 29
+-#define EF_R24 30
+-#define EF_R25 31
++#define MIPS32_EF_R0 6
++#define MIPS32_EF_R1 7
++#define MIPS32_EF_R2 8
++#define MIPS32_EF_R3 9
++#define MIPS32_EF_R4 10
++#define MIPS32_EF_R5 11
++#define MIPS32_EF_R6 12
++#define MIPS32_EF_R7 13
++#define MIPS32_EF_R8 14
++#define MIPS32_EF_R9 15
++#define MIPS32_EF_R10 16
++#define MIPS32_EF_R11 17
++#define MIPS32_EF_R12 18
++#define MIPS32_EF_R13 19
++#define MIPS32_EF_R14 20
++#define MIPS32_EF_R15 21
++#define MIPS32_EF_R16 22
++#define MIPS32_EF_R17 23
++#define MIPS32_EF_R18 24
++#define MIPS32_EF_R19 25
++#define MIPS32_EF_R20 26
++#define MIPS32_EF_R21 27
++#define MIPS32_EF_R22 28
++#define MIPS32_EF_R23 29
++#define MIPS32_EF_R24 30
++#define MIPS32_EF_R25 31
+
+ /*
+ * k0/k1 unsaved
+ */
+-#define EF_R26 32
+-#define EF_R27 33
++#define MIPS32_EF_R26 32
++#define MIPS32_EF_R27 33
+
+-#define EF_R28 34
+-#define EF_R29 35
+-#define EF_R30 36
+-#define EF_R31 37
++#define MIPS32_EF_R28 34
++#define MIPS32_EF_R29 35
++#define MIPS32_EF_R30 36
++#define MIPS32_EF_R31 37
+
+ /*
+ * Saved special registers
+ */
+-#define EF_LO 38
+-#define EF_HI 39
+-
+-#define EF_CP0_EPC 40
+-#define EF_CP0_BADVADDR 41
+-#define EF_CP0_STATUS 42
+-#define EF_CP0_CAUSE 43
+-#define EF_UNUSED0 44
+-
+-#define EF_SIZE 180
+-
+-#endif
+-
+-#if defined(CONFIG_64BIT) && !defined(WANT_COMPAT_REG_H)
+-
+-#define EF_R0 0
+-#define EF_R1 1
+-#define EF_R2 2
+-#define EF_R3 3
+-#define EF_R4 4
+-#define EF_R5 5
+-#define EF_R6 6
+-#define EF_R7 7
+-#define EF_R8 8
+-#define EF_R9 9
+-#define EF_R10 10
+-#define EF_R11 11
+-#define EF_R12 12
+-#define EF_R13 13
+-#define EF_R14 14
+-#define EF_R15 15
+-#define EF_R16 16
+-#define EF_R17 17
+-#define EF_R18 18
+-#define EF_R19 19
+-#define EF_R20 20
+-#define EF_R21 21
+-#define EF_R22 22
+-#define EF_R23 23
+-#define EF_R24 24
+-#define EF_R25 25
++#define MIPS32_EF_LO 38
++#define MIPS32_EF_HI 39
++
++#define MIPS32_EF_CP0_EPC 40
++#define MIPS32_EF_CP0_BADVADDR 41
++#define MIPS32_EF_CP0_STATUS 42
++#define MIPS32_EF_CP0_CAUSE 43
++#define MIPS32_EF_UNUSED0 44
++
++#define MIPS32_EF_SIZE 180
++
++#define MIPS64_EF_R0 0
++#define MIPS64_EF_R1 1
++#define MIPS64_EF_R2 2
++#define MIPS64_EF_R3 3
++#define MIPS64_EF_R4 4
++#define MIPS64_EF_R5 5
++#define MIPS64_EF_R6 6
++#define MIPS64_EF_R7 7
++#define MIPS64_EF_R8 8
++#define MIPS64_EF_R9 9
++#define MIPS64_EF_R10 10
++#define MIPS64_EF_R11 11
++#define MIPS64_EF_R12 12
++#define MIPS64_EF_R13 13
++#define MIPS64_EF_R14 14
++#define MIPS64_EF_R15 15
++#define MIPS64_EF_R16 16
++#define MIPS64_EF_R17 17
++#define MIPS64_EF_R18 18
++#define MIPS64_EF_R19 19
++#define MIPS64_EF_R20 20
++#define MIPS64_EF_R21 21
++#define MIPS64_EF_R22 22
++#define MIPS64_EF_R23 23
++#define MIPS64_EF_R24 24
++#define MIPS64_EF_R25 25
+
+ /*
+ * k0/k1 unsaved
+ */
+-#define EF_R26 26
+-#define EF_R27 27
++#define MIPS64_EF_R26 26
++#define MIPS64_EF_R27 27
+
+
+-#define EF_R28 28
+-#define EF_R29 29
+-#define EF_R30 30
+-#define EF_R31 31
++#define MIPS64_EF_R28 28
++#define MIPS64_EF_R29 29
++#define MIPS64_EF_R30 30
++#define MIPS64_EF_R31 31
+
+ /*
+ * Saved special registers
+ */
+-#define EF_LO 32
+-#define EF_HI 33
+-
+-#define EF_CP0_EPC 34
+-#define EF_CP0_BADVADDR 35
+-#define EF_CP0_STATUS 36
+-#define EF_CP0_CAUSE 37
+-
+-#define EF_SIZE 304 /* size in bytes */
++#define MIPS64_EF_LO 32
++#define MIPS64_EF_HI 33
++
++#define MIPS64_EF_CP0_EPC 34
++#define MIPS64_EF_CP0_BADVADDR 35
++#define MIPS64_EF_CP0_STATUS 36
++#define MIPS64_EF_CP0_CAUSE 37
++
++#define MIPS64_EF_SIZE 304 /* size in bytes */
++
++#if defined(CONFIG_32BIT)
++
++#define EF_R0 MIPS32_EF_R0
++#define EF_R1 MIPS32_EF_R1
++#define EF_R2 MIPS32_EF_R2
++#define EF_R3 MIPS32_EF_R3
++#define EF_R4 MIPS32_EF_R4
++#define EF_R5 MIPS32_EF_R5
++#define EF_R6 MIPS32_EF_R6
++#define EF_R7 MIPS32_EF_R7
++#define EF_R8 MIPS32_EF_R8
++#define EF_R9 MIPS32_EF_R9
++#define EF_R10 MIPS32_EF_R10
++#define EF_R11 MIPS32_EF_R11
++#define EF_R12 MIPS32_EF_R12
++#define EF_R13 MIPS32_EF_R13
++#define EF_R14 MIPS32_EF_R14
++#define EF_R15 MIPS32_EF_R15
++#define EF_R16 MIPS32_EF_R16
++#define EF_R17 MIPS32_EF_R17
++#define EF_R18 MIPS32_EF_R18
++#define EF_R19 MIPS32_EF_R19
++#define EF_R20 MIPS32_EF_R20
++#define EF_R21 MIPS32_EF_R21
++#define EF_R22 MIPS32_EF_R22
++#define EF_R23 MIPS32_EF_R23
++#define EF_R24 MIPS32_EF_R24
++#define EF_R25 MIPS32_EF_R25
++#define EF_R26 MIPS32_EF_R26
++#define EF_R27 MIPS32_EF_R27
++#define EF_R28 MIPS32_EF_R28
++#define EF_R29 MIPS32_EF_R29
++#define EF_R30 MIPS32_EF_R30
++#define EF_R31 MIPS32_EF_R31
++#define EF_LO MIPS32_EF_LO
++#define EF_HI MIPS32_EF_HI
++#define EF_CP0_EPC MIPS32_EF_CP0_EPC
++#define EF_CP0_BADVADDR MIPS32_EF_CP0_BADVADDR
++#define EF_CP0_STATUS MIPS32_EF_CP0_STATUS
++#define EF_CP0_CAUSE MIPS32_EF_CP0_CAUSE
++#define EF_UNUSED0 MIPS32_EF_UNUSED0
++#define EF_SIZE MIPS32_EF_SIZE
++
++#elif defined(CONFIG_64BIT)
++
++#define EF_R0 MIPS64_EF_R0
++#define EF_R1 MIPS64_EF_R1
++#define EF_R2 MIPS64_EF_R2
++#define EF_R3 MIPS64_EF_R3
++#define EF_R4 MIPS64_EF_R4
++#define EF_R5 MIPS64_EF_R5
++#define EF_R6 MIPS64_EF_R6
++#define EF_R7 MIPS64_EF_R7
++#define EF_R8 MIPS64_EF_R8
++#define EF_R9 MIPS64_EF_R9
++#define EF_R10 MIPS64_EF_R10
++#define EF_R11 MIPS64_EF_R11
++#define EF_R12 MIPS64_EF_R12
++#define EF_R13 MIPS64_EF_R13
++#define EF_R14 MIPS64_EF_R14
++#define EF_R15 MIPS64_EF_R15
++#define EF_R16 MIPS64_EF_R16
++#define EF_R17 MIPS64_EF_R17
++#define EF_R18 MIPS64_EF_R18
++#define EF_R19 MIPS64_EF_R19
++#define EF_R20 MIPS64_EF_R20
++#define EF_R21 MIPS64_EF_R21
++#define EF_R22 MIPS64_EF_R22
++#define EF_R23 MIPS64_EF_R23
++#define EF_R24 MIPS64_EF_R24
++#define EF_R25 MIPS64_EF_R25
++#define EF_R26 MIPS64_EF_R26
++#define EF_R27 MIPS64_EF_R27
++#define EF_R28 MIPS64_EF_R28
++#define EF_R29 MIPS64_EF_R29
++#define EF_R30 MIPS64_EF_R30
++#define EF_R31 MIPS64_EF_R31
++#define EF_LO MIPS64_EF_LO
++#define EF_HI MIPS64_EF_HI
++#define EF_CP0_EPC MIPS64_EF_CP0_EPC
++#define EF_CP0_BADVADDR MIPS64_EF_CP0_BADVADDR
++#define EF_CP0_STATUS MIPS64_EF_CP0_STATUS
++#define EF_CP0_CAUSE MIPS64_EF_CP0_CAUSE
++#define EF_SIZE MIPS64_EF_SIZE
+
+ #endif /* CONFIG_64BIT */
+
+diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
+index 17960fe7a8ce..cdf68b33bd65 100644
+--- a/arch/mips/include/asm/syscall.h
++++ b/arch/mips/include/asm/syscall.h
+@@ -131,10 +131,12 @@ static inline int syscall_get_arch(void)
+ {
+ int arch = EM_MIPS;
+ #ifdef CONFIG_64BIT
+- if (!test_thread_flag(TIF_32BIT_REGS))
++ if (!test_thread_flag(TIF_32BIT_REGS)) {
+ arch |= __AUDIT_ARCH_64BIT;
+- if (test_thread_flag(TIF_32BIT_ADDR))
+- arch |= __AUDIT_ARCH_CONVENTION_MIPS64_N32;
++ /* N32 sets only TIF_32BIT_ADDR */
++ if (test_thread_flag(TIF_32BIT_ADDR))
++ arch |= __AUDIT_ARCH_CONVENTION_MIPS64_N32;
++ }
+ #endif
+ #if defined(__LITTLE_ENDIAN)
+ arch |= __AUDIT_ARCH_LE;
+diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
+index 7faf5f2bee25..71df942fb77c 100644
+--- a/arch/mips/kernel/binfmt_elfo32.c
++++ b/arch/mips/kernel/binfmt_elfo32.c
+@@ -72,12 +72,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+ #include <asm/processor.h>
+
+-/*
+- * When this file is selected, we are definitely running a 64bit kernel.
+- * So using the right regs define in asm/reg.h
+- */
+-#define WANT_COMPAT_REG_H
+-
+ /* These MUST be defined before elf.h gets included */
+ extern void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs);
+ #define ELF_CORE_COPY_REGS(_dest, _regs) elf32_core_copy_regs(_dest, _regs);
+@@ -149,21 +143,21 @@ void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs)
+ {
+ int i;
+
+- for (i = 0; i < EF_R0; i++)
++ for (i = 0; i < MIPS32_EF_R0; i++)
+ grp[i] = 0;
+- grp[EF_R0] = 0;
++ grp[MIPS32_EF_R0] = 0;
+ for (i = 1; i <= 31; i++)
+- grp[EF_R0 + i] = (elf_greg_t) regs->regs[i];
+- grp[EF_R26] = 0;
+- grp[EF_R27] = 0;
+- grp[EF_LO] = (elf_greg_t) regs->lo;
+- grp[EF_HI] = (elf_greg_t) regs->hi;
+- grp[EF_CP0_EPC] = (elf_greg_t) regs->cp0_epc;
+- grp[EF_CP0_BADVADDR] = (elf_greg_t) regs->cp0_badvaddr;
+- grp[EF_CP0_STATUS] = (elf_greg_t) regs->cp0_status;
+- grp[EF_CP0_CAUSE] = (elf_greg_t) regs->cp0_cause;
+-#ifdef EF_UNUSED0
+- grp[EF_UNUSED0] = 0;
++ grp[MIPS32_EF_R0 + i] = (elf_greg_t) regs->regs[i];
++ grp[MIPS32_EF_R26] = 0;
++ grp[MIPS32_EF_R27] = 0;
++ grp[MIPS32_EF_LO] = (elf_greg_t) regs->lo;
++ grp[MIPS32_EF_HI] = (elf_greg_t) regs->hi;
++ grp[MIPS32_EF_CP0_EPC] = (elf_greg_t) regs->cp0_epc;
++ grp[MIPS32_EF_CP0_BADVADDR] = (elf_greg_t) regs->cp0_badvaddr;
++ grp[MIPS32_EF_CP0_STATUS] = (elf_greg_t) regs->cp0_status;
++ grp[MIPS32_EF_CP0_CAUSE] = (elf_greg_t) regs->cp0_cause;
++#ifdef MIPS32_EF_UNUSED0
++ grp[MIPS32_EF_UNUSED0] = 0;
+ #endif
+ }
+
+diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
+index 6f4f739dad96..e6e97d2a5c9e 100644
+--- a/arch/mips/kernel/cps-vec.S
++++ b/arch/mips/kernel/cps-vec.S
+@@ -13,6 +13,7 @@
+ #include <asm/asm-offsets.h>
+ #include <asm/asmmacro.h>
+ #include <asm/cacheops.h>
++#include <asm/eva.h>
+ #include <asm/mipsregs.h>
+ #include <asm/mipsmtregs.h>
+ #include <asm/pm.h>
+@@ -166,6 +167,9 @@ dcache_done:
+ 1: jal mips_cps_core_init
+ nop
+
++ /* Do any EVA initialization if necessary */
++ eva_init
++
+ /*
+ * Boot any other VPEs within this core that should be online, and
+ * deactivate this VPE if it should be offline.
+diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
+index 88e4c323382c..d5e59b8f4863 100644
+--- a/arch/mips/kernel/irq-gic.c
++++ b/arch/mips/kernel/irq-gic.c
+@@ -269,11 +269,13 @@ static void __init gic_setup_intr(unsigned int intr, unsigned int cpu,
+
+ /* Setup Intr to Pin mapping */
+ if (pin & GIC_MAP_TO_NMI_MSK) {
++ int i;
++
+ GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin);
+ /* FIXME: hack to route NMI to all cpu's */
+- for (cpu = 0; cpu < NR_CPUS; cpu += 32) {
++ for (i = 0; i < NR_CPUS; i += 32) {
+ GICWRITE(GIC_REG_ADDR(SHARED,
+- GIC_SH_MAP_TO_VPE_REG_OFF(intr, cpu)),
++ GIC_SH_MAP_TO_VPE_REG_OFF(intr, i)),
+ 0xffffffff);
+ }
+ } else {
+diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
+index f639ccd5060c..aae71198b515 100644
+--- a/arch/mips/kernel/ptrace.c
++++ b/arch/mips/kernel/ptrace.c
+@@ -129,7 +129,7 @@ int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
+ }
+
+ __put_user(child->thread.fpu.fcr31, data + 64);
+- __put_user(current_cpu_data.fpu_id, data + 65);
++ __put_user(boot_cpu_data.fpu_id, data + 65);
+
+ return 0;
+ }
+@@ -151,6 +151,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
+ }
+
+ __get_user(child->thread.fpu.fcr31, data + 64);
++ child->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+
+ /* FIR may not be written. */
+
+@@ -246,36 +247,160 @@ int ptrace_set_watch_regs(struct task_struct *child,
+
+ /* regset get/set implementations */
+
+-static int gpr_get(struct task_struct *target,
+- const struct user_regset *regset,
+- unsigned int pos, unsigned int count,
+- void *kbuf, void __user *ubuf)
++#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
++
++static int gpr32_get(struct task_struct *target,
++ const struct user_regset *regset,
++ unsigned int pos, unsigned int count,
++ void *kbuf, void __user *ubuf)
+ {
+ struct pt_regs *regs = task_pt_regs(target);
++ u32 uregs[ELF_NGREG] = {};
++ unsigned i;
++
++ for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
++ /* k0/k1 are copied as zero. */
++ if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
++ continue;
++
++ uregs[i] = regs->regs[i - MIPS32_EF_R0];
++ }
+
+- return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+- regs, 0, sizeof(*regs));
++ uregs[MIPS32_EF_LO] = regs->lo;
++ uregs[MIPS32_EF_HI] = regs->hi;
++ uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
++ uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
++ uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
++ uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
++
++ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
++ sizeof(uregs));
+ }
+
+-static int gpr_set(struct task_struct *target,
+- const struct user_regset *regset,
+- unsigned int pos, unsigned int count,
+- const void *kbuf, const void __user *ubuf)
++static int gpr32_set(struct task_struct *target,
++ const struct user_regset *regset,
++ unsigned int pos, unsigned int count,
++ const void *kbuf, const void __user *ubuf)
+ {
+- struct pt_regs newregs;
+- int ret;
++ struct pt_regs *regs = task_pt_regs(target);
++ u32 uregs[ELF_NGREG];
++ unsigned start, num_regs, i;
++ int err;
++
++ start = pos / sizeof(u32);
++ num_regs = count / sizeof(u32);
++
++ if (start + num_regs > ELF_NGREG)
++ return -EIO;
++
++ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
++ sizeof(uregs));
++ if (err)
++ return err;
++
++ for (i = start; i < num_regs; i++) {
++ /*
++ * Cast all values to signed here so that if this is a 64-bit
++ * kernel, the supplied 32-bit values will be sign extended.
++ */
++ switch (i) {
++ case MIPS32_EF_R1 ... MIPS32_EF_R25:
++ /* k0/k1 are ignored. */
++ case MIPS32_EF_R28 ... MIPS32_EF_R31:
++ regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i];
++ break;
++ case MIPS32_EF_LO:
++ regs->lo = (s32)uregs[i];
++ break;
++ case MIPS32_EF_HI:
++ regs->hi = (s32)uregs[i];
++ break;
++ case MIPS32_EF_CP0_EPC:
++ regs->cp0_epc = (s32)uregs[i];
++ break;
++ }
++ }
++
++ return 0;
++}
++
++#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
++
++#ifdef CONFIG_64BIT
++
++static int gpr64_get(struct task_struct *target,
++ const struct user_regset *regset,
++ unsigned int pos, unsigned int count,
++ void *kbuf, void __user *ubuf)
++{
++ struct pt_regs *regs = task_pt_regs(target);
++ u64 uregs[ELF_NGREG] = {};
++ unsigned i;
++
++ for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
++ /* k0/k1 are copied as zero. */
++ if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
++ continue;
++
++ uregs[i] = regs->regs[i - MIPS64_EF_R0];
++ }
++
++ uregs[MIPS64_EF_LO] = regs->lo;
++ uregs[MIPS64_EF_HI] = regs->hi;
++ uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
++ uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
++ uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
++ uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
++
++ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
++ sizeof(uregs));
++}
+
+- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+- &newregs,
+- 0, sizeof(newregs));
+- if (ret)
+- return ret;
++static int gpr64_set(struct task_struct *target,
++ const struct user_regset *regset,
++ unsigned int pos, unsigned int count,
++ const void *kbuf, const void __user *ubuf)
++{
++ struct pt_regs *regs = task_pt_regs(target);
++ u64 uregs[ELF_NGREG];
++ unsigned start, num_regs, i;
++ int err;
++
++ start = pos / sizeof(u64);
++ num_regs = count / sizeof(u64);
+
+- *task_pt_regs(target) = newregs;
++ if (start + num_regs > ELF_NGREG)
++ return -EIO;
++
++ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
++ sizeof(uregs));
++ if (err)
++ return err;
++
++ for (i = start; i < num_regs; i++) {
++ switch (i) {
++ case MIPS64_EF_R1 ... MIPS64_EF_R25:
++ /* k0/k1 are ignored. */
++ case MIPS64_EF_R28 ... MIPS64_EF_R31:
++ regs->regs[i - MIPS64_EF_R0] = uregs[i];
++ break;
++ case MIPS64_EF_LO:
++ regs->lo = uregs[i];
++ break;
++ case MIPS64_EF_HI:
++ regs->hi = uregs[i];
++ break;
++ case MIPS64_EF_CP0_EPC:
++ regs->cp0_epc = uregs[i];
++ break;
++ }
++ }
+
+ return 0;
+ }
+
++#endif /* CONFIG_64BIT */
++
+ static int fpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+@@ -337,14 +462,16 @@ enum mips_regset {
+ REGSET_FPR,
+ };
+
++#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
++
+ static const struct user_regset mips_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(unsigned int),
+ .align = sizeof(unsigned int),
+- .get = gpr_get,
+- .set = gpr_set,
++ .get = gpr32_get,
++ .set = gpr32_set,
+ },
+ [REGSET_FPR] = {
+ .core_note_type = NT_PRFPREG,
+@@ -364,14 +491,18 @@ static const struct user_regset_view user_mips_view = {
+ .n = ARRAY_SIZE(mips_regsets),
+ };
+
++#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
++
++#ifdef CONFIG_64BIT
++
+ static const struct user_regset mips64_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(unsigned long),
+ .align = sizeof(unsigned long),
+- .get = gpr_get,
+- .set = gpr_set,
++ .get = gpr64_get,
++ .set = gpr64_set,
+ },
+ [REGSET_FPR] = {
+ .core_note_type = NT_PRFPREG,
+@@ -384,25 +515,26 @@ static const struct user_regset mips64_regsets[] = {
+ };
+
+ static const struct user_regset_view user_mips64_view = {
+- .name = "mips",
++ .name = "mips64",
+ .e_machine = ELF_ARCH,
+ .ei_osabi = ELF_OSABI,
+ .regsets = mips64_regsets,
+- .n = ARRAY_SIZE(mips_regsets),
++ .n = ARRAY_SIZE(mips64_regsets),
+ };
+
++#endif /* CONFIG_64BIT */
++
+ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+ {
+ #ifdef CONFIG_32BIT
+ return &user_mips_view;
+-#endif
+-
++#else
+ #ifdef CONFIG_MIPS32_O32
+- if (test_thread_flag(TIF_32BIT_REGS))
+- return &user_mips_view;
++ if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
++ return &user_mips_view;
+ #endif
+-
+ return &user_mips64_view;
++#endif
+ }
+
+ long arch_ptrace(struct task_struct *child, long request,
+@@ -480,7 +612,7 @@ long arch_ptrace(struct task_struct *child, long request,
+ break;
+ case FPC_EIR:
+ /* implementation / version register */
+- tmp = current_cpu_data.fpu_id;
++ tmp = boot_cpu_data.fpu_id;
+ break;
+ case DSP_BASE ... DSP_BASE + 5: {
+ dspreg_t *dregs;
+@@ -565,7 +697,7 @@ long arch_ptrace(struct task_struct *child, long request,
+ break;
+ #endif
+ case FPC_CSR:
+- child->thread.fpu.fcr31 = data;
++ child->thread.fpu.fcr31 = data & ~FPU_CSR_ALL_X;
+ break;
+ case DSP_BASE ... DSP_BASE + 5: {
+ dspreg_t *dregs;
+diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
+index b40c3ca60ee5..a83fb730b387 100644
+--- a/arch/mips/kernel/ptrace32.c
++++ b/arch/mips/kernel/ptrace32.c
+@@ -129,7 +129,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ break;
+ case FPC_EIR:
+ /* implementation / version register */
+- tmp = current_cpu_data.fpu_id;
++ tmp = boot_cpu_data.fpu_id;
+ break;
+ case DSP_BASE ... DSP_BASE + 5: {
+ dspreg_t *dregs;
+diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
+index f1343ccd7ed7..7f5feb25ae04 100644
+--- a/arch/mips/kernel/scall64-o32.S
++++ b/arch/mips/kernel/scall64-o32.S
+@@ -113,15 +113,19 @@ trace_a_syscall:
+ move s0, t2 # Save syscall pointer
+ move a0, sp
+ /*
+- * syscall number is in v0 unless we called syscall(__NR_###)
++ * absolute syscall number is in v0 unless we called syscall(__NR_###)
+ * where the real syscall number is in a0
+ * note: NR_syscall is the first O32 syscall but the macro is
+ * only defined when compiling with -mabi=32 (CONFIG_32BIT)
+ * therefore __NR_O32_Linux is used (4000)
+ */
+- addiu a1, v0, __NR_O32_Linux
+- bnez v0, 1f /* __NR_syscall at offset 0 */
+- lw a1, PT_R4(sp)
++ .set push
++ .set reorder
++ subu t1, v0, __NR_O32_Linux
++ move a1, v0
++ bnez t1, 1f /* __NR_syscall at offset 0 */
++ lw a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
++ .set pop
+
+ 1: jal syscall_trace_enter
+
+diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
+index 3babf6e4f894..21f23add04f4 100644
+--- a/arch/mips/kernel/smp-mt.c
++++ b/arch/mips/kernel/smp-mt.c
+@@ -288,6 +288,7 @@ struct plat_smp_ops vsmp_smp_ops = {
+ .prepare_cpus = vsmp_prepare_cpus,
+ };
+
++#ifdef CONFIG_PROC_FS
+ static int proc_cpuinfo_chain_call(struct notifier_block *nfb,
+ unsigned long action_unused, void *data)
+ {
+@@ -309,3 +310,4 @@ static int __init proc_cpuinfo_notifier_init(void)
+ }
+
+ subsys_initcall(proc_cpuinfo_notifier_init);
++#endif
+diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
+index 2b3517214d6d..e11906dff885 100644
+--- a/arch/mips/kernel/unaligned.c
++++ b/arch/mips/kernel/unaligned.c
+@@ -690,7 +690,6 @@ static void emulate_load_store_insn(struct pt_regs *regs,
+ case sdc1_op:
+ die_if_kernel("Unaligned FP access in kernel code", regs);
+ BUG_ON(!used_math());
+- BUG_ON(!is_fpu_owner());
+
+ lose_fpu(1); /* Save FPU state for the emulator. */
+ res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
+diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
+index e80e10bafc83..343fe0f559b1 100644
+--- a/arch/mips/mm/tlbex.c
++++ b/arch/mips/mm/tlbex.c
+@@ -1299,6 +1299,7 @@ static void build_r4000_tlb_refill_handler(void)
+ }
+ #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ uasm_l_tlb_huge_update(&l, p);
++ UASM_i_LW(&p, K0, 0, K1);
+ build_huge_update_entries(&p, htlb_info.huge_pte, K1);
+ build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
+ htlb_info.restore_scratch);
+diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
+index 6d9773096750..fdffc806664f 100644
+--- a/arch/mips/mti-malta/malta-memory.c
++++ b/arch/mips/mti-malta/malta-memory.c
+@@ -34,13 +34,19 @@ fw_memblock_t * __init fw_getmdesc(int eva)
+ /* otherwise look in the environment */
+
+ memsize_str = fw_getenv("memsize");
+- if (memsize_str)
+- tmp = kstrtol(memsize_str, 0, &memsize);
++ if (memsize_str) {
++ tmp = kstrtoul(memsize_str, 0, &memsize);
++ if (tmp)
++ pr_warn("Failed to read the 'memsize' env variable.\n");
++ }
+ if (eva) {
+ /* Look for ememsize for EVA */
+ ememsize_str = fw_getenv("ememsize");
+- if (ememsize_str)
+- tmp = kstrtol(ememsize_str, 0, &ememsize);
++ if (ememsize_str) {
++ tmp = kstrtoul(ememsize_str, 0, &ememsize);
++ if (tmp)
++ pr_warn("Failed to read the 'ememsize' env variable.\n");
++ }
+ }
+ if (!memsize && !ememsize) {
+ pr_warn("memsize not set in YAMON, set to default (32Mb)\n");
+diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
+index f92b0b54e921..8dcb721d03d8 100644
+--- a/arch/powerpc/include/asm/machdep.h
++++ b/arch/powerpc/include/asm/machdep.h
+@@ -57,10 +57,10 @@ struct machdep_calls {
+ void (*hpte_removebolted)(unsigned long ea,
+ int psize, int ssize);
+ void (*flush_hash_range)(unsigned long number, int local);
+- void (*hugepage_invalidate)(struct mm_struct *mm,
++ void (*hugepage_invalidate)(unsigned long vsid,
++ unsigned long addr,
+ unsigned char *hpte_slot_array,
+- unsigned long addr, int psize);
+-
++ int psize, int ssize);
+ /* special for kexec, to be called in real mode, linear mapping is
+ * destroyed as well */
+ void (*hpte_clear_all)(void);
+diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
+index eb9261024f51..7b3d54fae46f 100644
+--- a/arch/powerpc/include/asm/pgtable-ppc64.h
++++ b/arch/powerpc/include/asm/pgtable-ppc64.h
+@@ -413,7 +413,7 @@ static inline char *get_hpte_slot_array(pmd_t *pmdp)
+ }
+
+ extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
+- pmd_t *pmdp);
++ pmd_t *pmdp, unsigned long old_pmd);
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
+ extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
+diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
+index d836d945068d..9ecede1e124c 100644
+--- a/arch/powerpc/include/asm/pte-hash64-64k.h
++++ b/arch/powerpc/include/asm/pte-hash64-64k.h
+@@ -46,11 +46,31 @@
+ * in order to deal with 64K made of 4K HW pages. Thus we override the
+ * generic accessors and iterators here
+ */
+-#define __real_pte(e,p) ((real_pte_t) { \
+- (e), (pte_val(e) & _PAGE_COMBO) ? \
+- (pte_val(*((p) + PTRS_PER_PTE))) : 0 })
+-#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
+- (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
++#define __real_pte __real_pte
++static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
++{
++ real_pte_t rpte;
++
++ rpte.pte = pte;
++ rpte.hidx = 0;
++ if (pte_val(pte) & _PAGE_COMBO) {
++ /*
++ * Make sure we order the hidx load against the _PAGE_COMBO
++ * check. The store side ordering is done in __hash_page_4K
++ */
++ smp_rmb();
++ rpte.hidx = pte_val(*((ptep) + PTRS_PER_PTE));
++ }
++ return rpte;
++}
++
++static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
++{
++ if ((pte_val(rpte.pte) & _PAGE_COMBO))
++ return (rpte.hidx >> (index<<2)) & 0xf;
++ return (pte_val(rpte.pte) >> 12) & 0xf;
++}
++
+ #define __rpte_to_pte(r) ((r).pte)
+ #define __rpte_sub_valid(rpte, index) \
+ (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index)))
+diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
+index 88e3ec6e1d96..48fb2c18fa81 100644
+--- a/arch/powerpc/kernel/iommu.c
++++ b/arch/powerpc/kernel/iommu.c
+@@ -1120,37 +1120,41 @@ EXPORT_SYMBOL_GPL(iommu_release_ownership);
+ int iommu_add_device(struct device *dev)
+ {
+ struct iommu_table *tbl;
+- int ret = 0;
+
+- if (WARN_ON(dev->iommu_group)) {
+- pr_warn("iommu_tce: device %s is already in iommu group %d, skipping\n",
+- dev_name(dev),
+- iommu_group_id(dev->iommu_group));
++ /*
++ * The sysfs entries should be populated before
++ * binding IOMMU group. If sysfs entries isn't
++ * ready, we simply bail.
++ */
++ if (!device_is_registered(dev))
++ return -ENOENT;
++
++ if (dev->iommu_group) {
++ pr_debug("%s: Skipping device %s with iommu group %d\n",
++ __func__, dev_name(dev),
++ iommu_group_id(dev->iommu_group));
+ return -EBUSY;
+ }
+
+ tbl = get_iommu_table_base(dev);
+ if (!tbl || !tbl->it_group) {
+- pr_debug("iommu_tce: skipping device %s with no tbl\n",
+- dev_name(dev));
++ pr_debug("%s: Skipping device %s with no tbl\n",
++ __func__, dev_name(dev));
+ return 0;
+ }
+
+- pr_debug("iommu_tce: adding %s to iommu group %d\n",
+- dev_name(dev), iommu_group_id(tbl->it_group));
++ pr_debug("%s: Adding %s to iommu group %d\n",
++ __func__, dev_name(dev),
++ iommu_group_id(tbl->it_group));
+
+ if (PAGE_SIZE < IOMMU_PAGE_SIZE(tbl)) {
+- pr_err("iommu_tce: unsupported iommu page size.");
+- pr_err("%s has not been added\n", dev_name(dev));
++ pr_err("%s: Invalid IOMMU page size %lx (%lx) on %s\n",
++ __func__, IOMMU_PAGE_SIZE(tbl),
++ PAGE_SIZE, dev_name(dev));
+ return -EINVAL;
+ }
+
+- ret = iommu_group_add_device(tbl->it_group, dev);
+- if (ret < 0)
+- pr_err("iommu_tce: %s has not been added, ret=%d\n",
+- dev_name(dev), ret);
+-
+- return ret;
++ return iommu_group_add_device(tbl->it_group, dev);
+ }
+ EXPORT_SYMBOL_GPL(iommu_add_device);
+
+diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
+index cf1d325eae8b..afc0a8295f84 100644
+--- a/arch/powerpc/mm/hash_native_64.c
++++ b/arch/powerpc/mm/hash_native_64.c
+@@ -412,18 +412,18 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
+ local_irq_restore(flags);
+ }
+
+-static void native_hugepage_invalidate(struct mm_struct *mm,
++static void native_hugepage_invalidate(unsigned long vsid,
++ unsigned long addr,
+ unsigned char *hpte_slot_array,
+- unsigned long addr, int psize)
++ int psize, int ssize)
+ {
+- int ssize = 0, i;
+- int lock_tlbie;
++ int i;
+ struct hash_pte *hptep;
+ int actual_psize = MMU_PAGE_16M;
+ unsigned int max_hpte_count, valid;
+ unsigned long flags, s_addr = addr;
+ unsigned long hpte_v, want_v, shift;
+- unsigned long hidx, vpn = 0, vsid, hash, slot;
++ unsigned long hidx, vpn = 0, hash, slot;
+
+ shift = mmu_psize_defs[psize].shift;
+ max_hpte_count = 1U << (PMD_SHIFT - shift);
+@@ -437,15 +437,6 @@ static void native_hugepage_invalidate(struct mm_struct *mm,
+
+ /* get the vpn */
+ addr = s_addr + (i * (1ul << shift));
+- if (!is_kernel_addr(addr)) {
+- ssize = user_segment_size(addr);
+- vsid = get_vsid(mm->context.id, addr, ssize);
+- WARN_ON(vsid == 0);
+- } else {
+- vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
+- ssize = mmu_kernel_ssize;
+- }
+-
+ vpn = hpt_vpn(addr, vsid, ssize);
+ hash = hpt_hash(vpn, shift, ssize);
+ if (hidx & _PTEIDX_SECONDARY)
+@@ -465,22 +456,13 @@ static void native_hugepage_invalidate(struct mm_struct *mm,
+ else
+ /* Invalidate the hpte. NOTE: this also unlocks it */
+ hptep->v = 0;
++ /*
++ * We need to do tlb invalidate for all the address, tlbie
++ * instruction compares entry_VA in tlb with the VA specified
++ * here
++ */
++ tlbie(vpn, psize, actual_psize, ssize, 0);
+ }
+- /*
+- * Since this is a hugepage, we just need a single tlbie.
+- * use the last vpn.
+- */
+- lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+- if (lock_tlbie)
+- raw_spin_lock(&native_tlbie_lock);
+-
+- asm volatile("ptesync":::"memory");
+- __tlbie(vpn, psize, actual_psize, ssize);
+- asm volatile("eieio; tlbsync; ptesync":::"memory");
+-
+- if (lock_tlbie)
+- raw_spin_unlock(&native_tlbie_lock);
+-
+ local_irq_restore(flags);
+ }
+
+diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
+index 826893fcb3a7..5f5e6328c21c 100644
+--- a/arch/powerpc/mm/hugepage-hash64.c
++++ b/arch/powerpc/mm/hugepage-hash64.c
+@@ -18,6 +18,57 @@
+ #include <linux/mm.h>
+ #include <asm/machdep.h>
+
++static void invalidate_old_hpte(unsigned long vsid, unsigned long addr,
++ pmd_t *pmdp, unsigned int psize, int ssize)
++{
++ int i, max_hpte_count, valid;
++ unsigned long s_addr;
++ unsigned char *hpte_slot_array;
++ unsigned long hidx, shift, vpn, hash, slot;
++
++ s_addr = addr & HPAGE_PMD_MASK;
++ hpte_slot_array = get_hpte_slot_array(pmdp);
++ /*
++ * IF we try to do a HUGE PTE update after a withdraw is done.
++ * we will find the below NULL. This happens when we do
++ * split_huge_page_pmd
++ */
++ if (!hpte_slot_array)
++ return;
++
++ if (ppc_md.hugepage_invalidate)
++ return ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
++ psize, ssize);
++ /*
++ * No bluk hpte removal support, invalidate each entry
++ */
++ shift = mmu_psize_defs[psize].shift;
++ max_hpte_count = HPAGE_PMD_SIZE >> shift;
++ for (i = 0; i < max_hpte_count; i++) {
++ /*
++ * 8 bits per each hpte entries
++ * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
++ */
++ valid = hpte_valid(hpte_slot_array, i);
++ if (!valid)
++ continue;
++ hidx = hpte_hash_index(hpte_slot_array, i);
++
++ /* get the vpn */
++ addr = s_addr + (i * (1ul << shift));
++ vpn = hpt_vpn(addr, vsid, ssize);
++ hash = hpt_hash(vpn, shift, ssize);
++ if (hidx & _PTEIDX_SECONDARY)
++ hash = ~hash;
++
++ slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
++ slot += hidx & _PTEIDX_GROUP_IX;
++ ppc_md.hpte_invalidate(slot, vpn, psize,
++ MMU_PAGE_16M, ssize, 0);
++ }
++}
++
++
+ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
+ pmd_t *pmdp, unsigned long trap, int local, int ssize,
+ unsigned int psize)
+@@ -33,7 +84,9 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
+ * atomically mark the linux large page PMD busy and dirty
+ */
+ do {
+- old_pmd = pmd_val(*pmdp);
++ pmd_t pmd = ACCESS_ONCE(*pmdp);
++
++ old_pmd = pmd_val(pmd);
+ /* If PMD busy, retry the access */
+ if (unlikely(old_pmd & _PAGE_BUSY))
+ return 0;
+@@ -85,6 +138,15 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
+ vpn = hpt_vpn(ea, vsid, ssize);
+ hash = hpt_hash(vpn, shift, ssize);
+ hpte_slot_array = get_hpte_slot_array(pmdp);
++ if (psize == MMU_PAGE_4K) {
++ /*
++ * invalidate the old hpte entry if we have that mapped via 64K
++ * base page size. This is because demote_segment won't flush
++ * hash page table entries.
++ */
++ if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO))
++ invalidate_old_hpte(vsid, ea, pmdp, MMU_PAGE_64K, ssize);
++ }
+
+ valid = hpte_valid(hpte_slot_array, index);
+ if (valid) {
+@@ -107,11 +169,8 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
+ * safely update this here.
+ */
+ valid = 0;
+- new_pmd &= ~_PAGE_HPTEFLAGS;
+ hpte_slot_array[index] = 0;
+- } else
+- /* clear the busy bits and set the hash pte bits */
+- new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
++ }
+ }
+
+ if (!valid) {
+@@ -119,11 +178,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
+
+ /* insert new entry */
+ pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
+-repeat:
+- hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+-
+- /* clear the busy bits and set the hash pte bits */
+- new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
++ new_pmd |= _PAGE_HASHPTE;
+
+ /* Add in WIMG bits */
+ rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
+@@ -132,6 +187,8 @@ repeat:
+ * enable the memory coherence always
+ */
+ rflags |= HPTE_R_M;
++repeat:
++ hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+
+ /* Insert into the hash table, primary slot */
+ slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
+@@ -172,8 +229,17 @@ repeat:
+ mark_hpte_slot_valid(hpte_slot_array, index, slot);
+ }
+ /*
+- * No need to use ldarx/stdcx here
++ * Mark the pte with _PAGE_COMBO, if we are trying to hash it with
++ * base page size 4k.
++ */
++ if (psize == MMU_PAGE_4K)
++ new_pmd |= _PAGE_COMBO;
++ /*
++ * The hpte valid is stored in the pgtable whose address is in the
++ * second half of the PMD. Order this against clearing of the busy bit in
++ * huge pmd.
+ */
++ smp_wmb();
+ *pmdp = __pmd(new_pmd & ~_PAGE_BUSY);
+ return 0;
+ }
+diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
+index 3b181b22cd46..d3e9a78eaed3 100644
+--- a/arch/powerpc/mm/numa.c
++++ b/arch/powerpc/mm/numa.c
+@@ -611,8 +611,8 @@ static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
+ unmap_cpu_from_node(lcpu);
+- break;
+ ret = NOTIFY_OK;
++ break;
+ #endif
+ }
+ return ret;
+diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
+index f6ce1f111f5b..71d084b6f766 100644
+--- a/arch/powerpc/mm/pgtable_64.c
++++ b/arch/powerpc/mm/pgtable_64.c
+@@ -538,7 +538,7 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
+ *pmdp = __pmd((old & ~clr) | set);
+ #endif
+ if (old & _PAGE_HASHPTE)
+- hpte_do_hugepage_flush(mm, addr, pmdp);
++ hpte_do_hugepage_flush(mm, addr, pmdp, old);
+ return old;
+ }
+
+@@ -645,7 +645,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma,
+ if (!(old & _PAGE_SPLITTING)) {
+ /* We need to flush the hpte */
+ if (old & _PAGE_HASHPTE)
+- hpte_do_hugepage_flush(vma->vm_mm, address, pmdp);
++ hpte_do_hugepage_flush(vma->vm_mm, address, pmdp, old);
+ }
+ /*
+ * This ensures that generic code that rely on IRQ disabling
+@@ -723,7 +723,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ * neesd to be flushed.
+ */
+ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
+- pmd_t *pmdp)
++ pmd_t *pmdp, unsigned long old_pmd)
+ {
+ int ssize, i;
+ unsigned long s_addr;
+@@ -745,12 +745,29 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
+ if (!hpte_slot_array)
+ return;
+
+- /* get the base page size */
++ /* get the base page size,vsid and segment size */
++#ifdef CONFIG_DEBUG_VM
+ psize = get_slice_psize(mm, s_addr);
++ BUG_ON(psize == MMU_PAGE_16M);
++#endif
++ if (old_pmd & _PAGE_COMBO)
++ psize = MMU_PAGE_4K;
++ else
++ psize = MMU_PAGE_64K;
++
++ if (!is_kernel_addr(s_addr)) {
++ ssize = user_segment_size(s_addr);
++ vsid = get_vsid(mm->context.id, s_addr, ssize);
++ WARN_ON(vsid == 0);
++ } else {
++ vsid = get_kernel_vsid(s_addr, mmu_kernel_ssize);
++ ssize = mmu_kernel_ssize;
++ }
+
+ if (ppc_md.hugepage_invalidate)
+- return ppc_md.hugepage_invalidate(mm, hpte_slot_array,
+- s_addr, psize);
++ return ppc_md.hugepage_invalidate(vsid, s_addr,
++ hpte_slot_array,
++ psize, ssize);
+ /*
+ * No bluk hpte removal support, invalidate each entry
+ */
+@@ -768,15 +785,6 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
+
+ /* get the vpn */
+ addr = s_addr + (i * (1ul << shift));
+- if (!is_kernel_addr(addr)) {
+- ssize = user_segment_size(addr);
+- vsid = get_vsid(mm->context.id, addr, ssize);
+- WARN_ON(vsid == 0);
+- } else {
+- vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
+- ssize = mmu_kernel_ssize;
+- }
+-
+ vpn = hpt_vpn(addr, vsid, ssize);
+ hash = hpt_hash(vpn, shift, ssize);
+ if (hidx & _PTEIDX_SECONDARY)
+diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
+index c99f6510a0b2..9adda5790463 100644
+--- a/arch/powerpc/mm/tlb_hash64.c
++++ b/arch/powerpc/mm/tlb_hash64.c
+@@ -216,7 +216,7 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
+ if (!(pte & _PAGE_HASHPTE))
+ continue;
+ if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte)))
+- hpte_do_hugepage_flush(mm, start, (pmd_t *)pte);
++ hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte);
+ else
+ hpte_need_flush(mm, start, ptep, pte, 0);
+ }
+diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
+index 3136ae2f75af..dc30aa5a2ce8 100644
+--- a/arch/powerpc/platforms/powernv/pci-ioda.c
++++ b/arch/powerpc/platforms/powernv/pci-ioda.c
+@@ -462,7 +462,7 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
+
+ pe = &phb->ioda.pe_array[pdn->pe_number];
+ WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
+- set_iommu_table_base(&pdev->dev, &pe->tce32_table);
++ set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table);
+ }
+
+ static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
+diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
+index 7995135170a3..24abc5c223c7 100644
+--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
+@@ -146,7 +146,7 @@ static inline int pseries_remove_memblock(unsigned long base,
+ }
+ static inline int pseries_remove_mem_node(struct device_node *np)
+ {
+- return -EOPNOTSUPP;
++ return 0;
+ }
+ #endif /* CONFIG_MEMORY_HOTREMOVE */
+
+diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
+index 33b552ffbe57..4642d6a4d356 100644
+--- a/arch/powerpc/platforms/pseries/iommu.c
++++ b/arch/powerpc/platforms/pseries/iommu.c
+@@ -721,13 +721,13 @@ static int __init disable_ddw_setup(char *str)
+
+ early_param("disable_ddw", disable_ddw_setup);
+
+-static void remove_ddw(struct device_node *np)
++static void remove_ddw(struct device_node *np, bool remove_prop)
+ {
+ struct dynamic_dma_window_prop *dwp;
+ struct property *win64;
+ const u32 *ddw_avail;
+ u64 liobn;
+- int len, ret;
++ int len, ret = 0;
+
+ ddw_avail = of_get_property(np, "ibm,ddw-applicable", &len);
+ win64 = of_find_property(np, DIRECT64_PROPNAME, NULL);
+@@ -761,7 +761,8 @@ static void remove_ddw(struct device_node *np)
+ np->full_name, ret, ddw_avail[2], liobn);
+
+ delprop:
+- ret = of_remove_property(np, win64);
++ if (remove_prop)
++ ret = of_remove_property(np, win64);
+ if (ret)
+ pr_warning("%s: failed to remove direct window property: %d\n",
+ np->full_name, ret);
+@@ -805,7 +806,7 @@ static int find_existing_ddw_windows(void)
+ window = kzalloc(sizeof(*window), GFP_KERNEL);
+ if (!window || len < sizeof(struct dynamic_dma_window_prop)) {
+ kfree(window);
+- remove_ddw(pdn);
++ remove_ddw(pdn, true);
+ continue;
+ }
+
+@@ -1045,7 +1046,7 @@ out_free_window:
+ kfree(window);
+
+ out_clear_window:
+- remove_ddw(pdn);
++ remove_ddw(pdn, true);
+
+ out_free_prop:
+ kfree(win64->name);
+@@ -1255,7 +1256,14 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti
+
+ switch (action) {
+ case OF_RECONFIG_DETACH_NODE:
+- remove_ddw(np);
++ /*
++ * Removing the property will invoke the reconfig
++ * notifier again, which causes dead-lock on the
++ * read-write semaphore of the notifier chain. So
++ * we have to remove the property when releasing
++ * the device node.
++ */
++ remove_ddw(np, false);
+ if (pci && pci->iommu_table)
+ iommu_free_table(pci->iommu_table, np->full_name);
+
+diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
+index b02af9ef3ff6..ccf6f162f69c 100644
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -430,16 +430,17 @@ static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
+ spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
+ }
+
+-static void pSeries_lpar_hugepage_invalidate(struct mm_struct *mm,
+- unsigned char *hpte_slot_array,
+- unsigned long addr, int psize)
++static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
++ unsigned long addr,
++ unsigned char *hpte_slot_array,
++ int psize, int ssize)
+ {
+- int ssize = 0, i, index = 0;
++ int i, index = 0;
+ unsigned long s_addr = addr;
+ unsigned int max_hpte_count, valid;
+ unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
+ unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
+- unsigned long shift, hidx, vpn = 0, vsid, hash, slot;
++ unsigned long shift, hidx, vpn = 0, hash, slot;
+
+ shift = mmu_psize_defs[psize].shift;
+ max_hpte_count = 1U << (PMD_SHIFT - shift);
+@@ -452,15 +453,6 @@ static void pSeries_lpar_hugepage_invalidate(struct mm_struct *mm,
+
+ /* get the vpn */
+ addr = s_addr + (i * (1ul << shift));
+- if (!is_kernel_addr(addr)) {
+- ssize = user_segment_size(addr);
+- vsid = get_vsid(mm->context.id, addr, ssize);
+- WARN_ON(vsid == 0);
+- } else {
+- vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
+- ssize = mmu_kernel_ssize;
+- }
+-
+ vpn = hpt_vpn(addr, vsid, ssize);
+ hash = hpt_hash(vpn, shift, ssize);
+ if (hidx & _PTEIDX_SECONDARY)
+diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
+index bb63499fc5d3..9f00f9301613 100644
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -92,6 +92,7 @@ config S390
+ select ARCH_INLINE_WRITE_UNLOCK_IRQ
+ select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
+ select ARCH_SAVE_PAGE_KEYS if HIBERNATION
++ select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_USE_CMPXCHG_LOCKREF
+ select ARCH_WANT_IPC_PARSE_VERSION
+ select BUILDTIME_EXTABLE_SORT
+diff --git a/arch/sh/include/asm/io_noioport.h b/arch/sh/include/asm/io_noioport.h
+index 4d48f1436a63..c727e6ddf69e 100644
+--- a/arch/sh/include/asm/io_noioport.h
++++ b/arch/sh/include/asm/io_noioport.h
+@@ -34,6 +34,17 @@ static inline void outl(unsigned int x, unsigned long port)
+ BUG();
+ }
+
++static inline void __iomem *ioport_map(unsigned long port, unsigned int size)
++{
++ BUG();
++ return NULL;
++}
++
++static inline void ioport_unmap(void __iomem *addr)
++{
++ BUG();
++}
++
+ #define inb_p(addr) inb(addr)
+ #define inw_p(addr) inw(addr)
+ #define inl_p(addr) inl(addr)
+diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
+index 14695c6221c8..84ab119b6ffa 100644
+--- a/block/scsi_ioctl.c
++++ b/block/scsi_ioctl.c
+@@ -438,6 +438,11 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
+ }
+
+ rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
++ if (!rq) {
++ err = -ENOMEM;
++ goto error;
++ }
++ blk_rq_set_block_pc(rq);
+
+ cmdlen = COMMAND_SIZE(opcode);
+
+@@ -491,7 +496,6 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
+ memset(sense, 0, sizeof(sense));
+ rq->sense = sense;
+ rq->sense_len = 0;
+- blk_rq_set_block_pc(rq);
+
+ blk_execute_rq(q, disk, rq, 0);
+
+@@ -511,7 +515,8 @@ out:
+
+ error:
+ kfree(buffer);
+- blk_put_request(rq);
++ if (rq)
++ blk_put_request(rq);
+ return err;
+ }
+ EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
+diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
+index fe54a8c73b8c..f1ea8e56cd87 100644
+--- a/drivers/acpi/acpica/nsobject.c
++++ b/drivers/acpi/acpica/nsobject.c
+@@ -239,6 +239,17 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
+ }
+ }
+
++ /*
++ * Detach the object from any data objects (which are still held by
++ * the namespace node)
++ */
++
++ if (obj_desc->common.next_object &&
++ ((obj_desc->common.next_object)->common.type ==
++ ACPI_TYPE_LOCAL_DATA)) {
++ obj_desc->common.next_object = NULL;
++ }
++
+ /* Reset the node type to untyped */
+
+ node->type = ACPI_TYPE_ANY;
+diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
+index 270c16464dd9..ff601c0f7c7a 100644
+--- a/drivers/acpi/acpica/utcopy.c
++++ b/drivers/acpi/acpica/utcopy.c
+@@ -1001,5 +1001,11 @@ acpi_ut_copy_iobject_to_iobject(union acpi_operand_object *source_desc,
+ status = acpi_ut_copy_simple_object(source_desc, *dest_desc);
+ }
+
++ /* Delete the allocated object if copy failed */
++
++ if (ACPI_FAILURE(status)) {
++ acpi_ut_remove_reference(*dest_desc);
++ }
++
+ return_ACPI_STATUS(status);
+ }
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index a66ab658abbc..9922cc46b15c 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -197,6 +197,8 @@ static bool advance_transaction(struct acpi_ec *ec)
+ t->rdata[t->ri++] = acpi_ec_read_data(ec);
+ if (t->rlen == t->ri) {
+ t->flags |= ACPI_EC_COMMAND_COMPLETE;
++ if (t->command == ACPI_EC_COMMAND_QUERY)
++ pr_debug("hardware QR_EC completion\n");
+ wakeup = true;
+ }
+ } else
+@@ -208,7 +210,20 @@ static bool advance_transaction(struct acpi_ec *ec)
+ }
+ return wakeup;
+ } else {
+- if ((status & ACPI_EC_FLAG_IBF) == 0) {
++ /*
++ * There is firmware refusing to respond QR_EC when SCI_EVT
++ * is not set, for which case, we complete the QR_EC
++ * without issuing it to the firmware.
++ * https://bugzilla.kernel.org/show_bug.cgi?id=86211
++ */
++ if (!(status & ACPI_EC_FLAG_SCI) &&
++ (t->command == ACPI_EC_COMMAND_QUERY)) {
++ t->flags |= ACPI_EC_COMMAND_POLL;
++ t->rdata[t->ri++] = 0x00;
++ t->flags |= ACPI_EC_COMMAND_COMPLETE;
++ pr_debug("software QR_EC completion\n");
++ wakeup = true;
++ } else if ((status & ACPI_EC_FLAG_IBF) == 0) {
+ acpi_ec_write_cmd(ec, t->command);
+ t->flags |= ACPI_EC_COMMAND_POLL;
+ } else
+@@ -288,11 +303,11 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
+ /* following two actions should be kept atomic */
+ ec->curr = t;
+ start_transaction(ec);
+- if (ec->curr->command == ACPI_EC_COMMAND_QUERY)
+- clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
+ spin_unlock_irqrestore(&ec->lock, tmp);
+ ret = ec_poll(ec);
+ spin_lock_irqsave(&ec->lock, tmp);
++ if (ec->curr->command == ACPI_EC_COMMAND_QUERY)
++ clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
+ ec->curr = NULL;
+ spin_unlock_irqrestore(&ec->lock, tmp);
+ return ret;
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index 3dca36d4ad26..17f9ec501972 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -1071,9 +1071,9 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
+
+ if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
+
+- cpuidle_pause_and_lock();
+ /* Protect against cpu-hotplug */
+ get_online_cpus();
++ cpuidle_pause_and_lock();
+
+ /* Disable all cpuidle devices */
+ for_each_online_cpu(cpu) {
+@@ -1100,8 +1100,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
+ cpuidle_enable_device(dev);
+ }
+ }
+- put_online_cpus();
+ cpuidle_resume_and_unlock();
++ put_online_cpus();
+ }
+
+ return 0;
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index f775fa0d850f..551f29127369 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -351,7 +351,8 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
+ unsigned long long sta;
+ acpi_status status;
+
+- if (device->handler->hotplug.demand_offline && !acpi_force_hot_remove) {
++ if (device->handler && device->handler->hotplug.demand_offline
++ && !acpi_force_hot_remove) {
+ if (!acpi_scan_is_offline(device, true))
+ return -EBUSY;
+ } else {
+@@ -664,8 +665,14 @@ static ssize_t
+ acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
+ char *buf) {
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
++ acpi_status status;
++ unsigned long long sun;
++
++ status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun);
++ if (ACPI_FAILURE(status))
++ return -ENODEV;
+
+- return sprintf(buf, "%lu\n", acpi_dev->pnp.sun);
++ return sprintf(buf, "%llu\n", sun);
+ }
+ static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL);
+
+@@ -687,7 +694,6 @@ static int acpi_device_setup_files(struct acpi_device *dev)
+ {
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_status status;
+- unsigned long long sun;
+ int result = 0;
+
+ /*
+@@ -728,14 +734,10 @@ static int acpi_device_setup_files(struct acpi_device *dev)
+ if (dev->pnp.unique_id)
+ result = device_create_file(&dev->dev, &dev_attr_uid);
+
+- status = acpi_evaluate_integer(dev->handle, "_SUN", NULL, &sun);
+- if (ACPI_SUCCESS(status)) {
+- dev->pnp.sun = (unsigned long)sun;
++ if (acpi_has_method(dev->handle, "_SUN")) {
+ result = device_create_file(&dev->dev, &dev_attr_sun);
+ if (result)
+ goto end;
+- } else {
+- dev->pnp.sun = (unsigned long)-1;
+ }
+
+ if (acpi_has_method(dev->handle, "_STA")) {
+@@ -919,12 +921,17 @@ static void acpi_device_notify(acpi_handle handle, u32 event, void *data)
+ device->driver->ops.notify(device, event);
+ }
+
+-static acpi_status acpi_device_notify_fixed(void *data)
++static void acpi_device_notify_fixed(void *data)
+ {
+ struct acpi_device *device = data;
+
+ /* Fixed hardware devices have no handles */
+ acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device);
++}
++
++static acpi_status acpi_device_fixed_event(void *data)
++{
++ acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_device_notify_fixed, data);
+ return AE_OK;
+ }
+
+@@ -935,12 +942,12 @@ static int acpi_device_install_notify_handler(struct acpi_device *device)
+ if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
+ status =
+ acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
+- acpi_device_notify_fixed,
++ acpi_device_fixed_event,
+ device);
+ else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
+ status =
+ acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
+- acpi_device_notify_fixed,
++ acpi_device_fixed_event,
+ device);
+ else
+ status = acpi_install_notify_handler(device->handle,
+@@ -957,10 +964,10 @@ static void acpi_device_remove_notify_handler(struct acpi_device *device)
+ {
+ if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
+ acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
+- acpi_device_notify_fixed);
++ acpi_device_fixed_event);
+ else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
+ acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
+- acpi_device_notify_fixed);
++ acpi_device_fixed_event);
+ else
+ acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
+ acpi_device_notify);
+@@ -972,7 +979,7 @@ static int acpi_device_probe(struct device *dev)
+ struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver);
+ int ret;
+
+- if (acpi_dev->handler)
++ if (acpi_dev->handler && !acpi_is_pnp_device(acpi_dev))
+ return -EINVAL;
+
+ if (!acpi_drv->ops.add)
+diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
+index 350d52a8f781..4834b4cae540 100644
+--- a/drivers/acpi/video.c
++++ b/drivers/acpi/video.c
+@@ -82,9 +82,9 @@ module_param(allow_duplicates, bool, 0644);
+ * For Windows 8 systems: used to decide if video module
+ * should skip registering backlight interface of its own.
+ */
+-static int use_native_backlight_param = 1;
++static int use_native_backlight_param = -1;
+ module_param_named(use_native_backlight, use_native_backlight_param, int, 0444);
+-static bool use_native_backlight_dmi = false;
++static bool use_native_backlight_dmi = true;
+
+ static int register_count;
+ static struct mutex video_list_lock;
+@@ -415,6 +415,12 @@ static int __init video_set_use_native_backlight(const struct dmi_system_id *d)
+ return 0;
+ }
+
++static int __init video_disable_native_backlight(const struct dmi_system_id *d)
++{
++ use_native_backlight_dmi = false;
++ return 0;
++}
++
+ static struct dmi_system_id video_dmi_table[] __initdata = {
+ /*
+ * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
+@@ -645,6 +651,41 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"),
+ },
+ },
++
++ /*
++ * These models have a working acpi_video backlight control, and using
++ * native backlight causes a regression where backlight does not work
++ * when userspace is not handling brightness key events. Disable
++ * native_backlight on these to fix this:
++ * https://bugzilla.kernel.org/show_bug.cgi?id=81691
++ */
++ {
++ .callback = video_disable_native_backlight,
++ .ident = "ThinkPad T420",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T420"),
++ },
++ },
++ {
++ .callback = video_disable_native_backlight,
++ .ident = "ThinkPad T520",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T520"),
++ },
++ },
++
++ /* The native backlight controls do not work on some older machines */
++ {
++ /* https://bugs.freedesktop.org/show_bug.cgi?id=81515 */
++ .callback = video_disable_native_backlight,
++ .ident = "HP ENVY 15 Notebook",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"),
++ },
++ },
+ {}
+ };
+
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index b2c98c1bc037..9dc02c429771 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -42,6 +42,7 @@
+ #include <linux/blkdev.h>
+ #include <linux/slab.h>
+ #include <linux/idr.h>
++#include <linux/workqueue.h>
+
+ #include "rbd_types.h"
+
+@@ -332,7 +333,10 @@ struct rbd_device {
+
+ char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
+
++ struct list_head rq_queue; /* incoming rq queue */
+ spinlock_t lock; /* queue, flags, open_count */
++ struct workqueue_struct *rq_wq;
++ struct work_struct rq_work;
+
+ struct rbd_image_header header;
+ unsigned long flags; /* possibly lock protected */
+@@ -3183,102 +3187,129 @@ out:
+ return ret;
+ }
+
+-static void rbd_request_fn(struct request_queue *q)
+- __releases(q->queue_lock) __acquires(q->queue_lock)
++static void rbd_handle_request(struct rbd_device *rbd_dev, struct request *rq)
+ {
+- struct rbd_device *rbd_dev = q->queuedata;
+- struct request *rq;
++ struct rbd_img_request *img_request;
++ u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
++ u64 length = blk_rq_bytes(rq);
++ bool wr = rq_data_dir(rq) == WRITE;
+ int result;
+
+- while ((rq = blk_fetch_request(q))) {
+- bool write_request = rq_data_dir(rq) == WRITE;
+- struct rbd_img_request *img_request;
+- u64 offset;
+- u64 length;
++ /* Ignore/skip any zero-length requests */
+
+- /* Ignore any non-FS requests that filter through. */
++ if (!length) {
++ dout("%s: zero-length request\n", __func__);
++ result = 0;
++ goto err_rq;
++ }
+
+- if (rq->cmd_type != REQ_TYPE_FS) {
+- dout("%s: non-fs request type %d\n", __func__,
+- (int) rq->cmd_type);
+- __blk_end_request_all(rq, 0);
+- continue;
++ /* Disallow writes to a read-only device */
++
++ if (wr) {
++ if (rbd_dev->mapping.read_only) {
++ result = -EROFS;
++ goto err_rq;
+ }
++ rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
++ }
+
+- /* Ignore/skip any zero-length requests */
++ /*
++ * Quit early if the mapped snapshot no longer exists. It's
++ * still possible the snapshot will have disappeared by the
++ * time our request arrives at the osd, but there's no sense in
++ * sending it if we already know.
++ */
++ if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
++ dout("request for non-existent snapshot");
++ rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
++ result = -ENXIO;
++ goto err_rq;
++ }
+
+- offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
+- length = (u64) blk_rq_bytes(rq);
++ if (offset && length > U64_MAX - offset + 1) {
++ rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
++ length);
++ result = -EINVAL;
++ goto err_rq; /* Shouldn't happen */
++ }
+
+- if (!length) {
+- dout("%s: zero-length request\n", __func__);
+- __blk_end_request_all(rq, 0);
+- continue;
+- }
++ if (offset + length > rbd_dev->mapping.size) {
++ rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
++ length, rbd_dev->mapping.size);
++ result = -EIO;
++ goto err_rq;
++ }
+
+- spin_unlock_irq(q->queue_lock);
++ img_request = rbd_img_request_create(rbd_dev, offset, length, wr);
++ if (!img_request) {
++ result = -ENOMEM;
++ goto err_rq;
++ }
++ img_request->rq = rq;
+
+- /* Disallow writes to a read-only device */
++ result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO, rq->bio);
++ if (result)
++ goto err_img_request;
+
+- if (write_request) {
+- result = -EROFS;
+- if (rbd_dev->mapping.read_only)
+- goto end_request;
+- rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
+- }
++ result = rbd_img_request_submit(img_request);
++ if (result)
++ goto err_img_request;
+
+- /*
+- * Quit early if the mapped snapshot no longer
+- * exists. It's still possible the snapshot will
+- * have disappeared by the time our request arrives
+- * at the osd, but there's no sense in sending it if
+- * we already know.
+- */
+- if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
+- dout("request for non-existent snapshot");
+- rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
+- result = -ENXIO;
+- goto end_request;
+- }
++ return;
+
+- result = -EINVAL;
+- if (offset && length > U64_MAX - offset + 1) {
+- rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
+- offset, length);
+- goto end_request; /* Shouldn't happen */
+- }
++err_img_request:
++ rbd_img_request_put(img_request);
++err_rq:
++ if (result)
++ rbd_warn(rbd_dev, "%s %llx at %llx result %d",
++ wr ? "write" : "read", length, offset, result);
++ blk_end_request_all(rq, result);
++}
+
+- result = -EIO;
+- if (offset + length > rbd_dev->mapping.size) {
+- rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
+- offset, length, rbd_dev->mapping.size);
+- goto end_request;
+- }
++static void rbd_request_workfn(struct work_struct *work)
++{
++ struct rbd_device *rbd_dev =
++ container_of(work, struct rbd_device, rq_work);
++ struct request *rq, *next;
++ LIST_HEAD(requests);
+
+- result = -ENOMEM;
+- img_request = rbd_img_request_create(rbd_dev, offset, length,
+- write_request);
+- if (!img_request)
+- goto end_request;
++ spin_lock_irq(&rbd_dev->lock); /* rq->q->queue_lock */
++ list_splice_init(&rbd_dev->rq_queue, &requests);
++ spin_unlock_irq(&rbd_dev->lock);
+
+- img_request->rq = rq;
++ list_for_each_entry_safe(rq, next, &requests, queuelist) {
++ list_del_init(&rq->queuelist);
++ rbd_handle_request(rbd_dev, rq);
++ }
++}
+
+- result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
+- rq->bio);
+- if (!result)
+- result = rbd_img_request_submit(img_request);
+- if (result)
+- rbd_img_request_put(img_request);
+-end_request:
+- spin_lock_irq(q->queue_lock);
+- if (result < 0) {
+- rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
+- write_request ? "write" : "read",
+- length, offset, result);
+-
+- __blk_end_request_all(rq, result);
++/*
++ * Called with q->queue_lock held and interrupts disabled, possibly on
++ * the way to schedule(). Do not sleep here!
++ */
++static void rbd_request_fn(struct request_queue *q)
++{
++ struct rbd_device *rbd_dev = q->queuedata;
++ struct request *rq;
++ int queued = 0;
++
++ rbd_assert(rbd_dev);
++
++ while ((rq = blk_fetch_request(q))) {
++ /* Ignore any non-FS requests that filter through. */
++ if (rq->cmd_type != REQ_TYPE_FS) {
++ dout("%s: non-fs request type %d\n", __func__,
++ (int) rq->cmd_type);
++ __blk_end_request_all(rq, 0);
++ continue;
+ }
++
++ list_add_tail(&rq->queuelist, &rbd_dev->rq_queue);
++ queued++;
+ }
++
++ if (queued)
++ queue_work(rbd_dev->rq_wq, &rbd_dev->rq_work);
+ }
+
+ /*
+@@ -3848,6 +3879,8 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
+ return NULL;
+
+ spin_lock_init(&rbd_dev->lock);
++ INIT_LIST_HEAD(&rbd_dev->rq_queue);
++ INIT_WORK(&rbd_dev->rq_work, rbd_request_workfn);
+ rbd_dev->flags = 0;
+ atomic_set(&rbd_dev->parent_ref, 0);
+ INIT_LIST_HEAD(&rbd_dev->node);
+@@ -5066,12 +5099,17 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
+ ret = rbd_dev_mapping_set(rbd_dev);
+ if (ret)
+ goto err_out_disk;
++
+ set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
+ set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
+
++ rbd_dev->rq_wq = alloc_workqueue(rbd_dev->disk->disk_name, 0, 0);
++ if (!rbd_dev->rq_wq)
++ goto err_out_mapping;
++
+ ret = rbd_bus_add_dev(rbd_dev);
+ if (ret)
+- goto err_out_mapping;
++ goto err_out_workqueue;
+
+ /* Everything's ready. Announce the disk to the world. */
+
+@@ -5083,6 +5121,9 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
+
+ return ret;
+
++err_out_workqueue:
++ destroy_workqueue(rbd_dev->rq_wq);
++ rbd_dev->rq_wq = NULL;
+ err_out_mapping:
+ rbd_dev_mapping_clear(rbd_dev);
+ err_out_disk:
+@@ -5314,6 +5355,7 @@ static void rbd_dev_device_release(struct device *dev)
+ {
+ struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
+
++ destroy_workqueue(rbd_dev->rq_wq);
+ rbd_free_disk(rbd_dev);
+ clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
+ rbd_dev_mapping_clear(rbd_dev);
+diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
+index dc79f88f8717..54d9f2e73495 100644
+--- a/drivers/bluetooth/btmrvl_drv.h
++++ b/drivers/bluetooth/btmrvl_drv.h
+@@ -68,6 +68,7 @@ struct btmrvl_adapter {
+ u8 hs_state;
+ u8 wakeup_tries;
+ wait_queue_head_t cmd_wait_q;
++ wait_queue_head_t event_hs_wait_q;
+ u8 cmd_complete;
+ bool is_suspended;
+ };
+diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
+index e9dbddb0b8f1..3ecba5c979bd 100644
+--- a/drivers/bluetooth/btmrvl_main.c
++++ b/drivers/bluetooth/btmrvl_main.c
+@@ -114,6 +114,7 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
+ adapter->hs_state = HS_ACTIVATED;
+ if (adapter->psmode)
+ adapter->ps_state = PS_SLEEP;
++ wake_up_interruptible(&adapter->event_hs_wait_q);
+ BT_DBG("HS ACTIVATED!");
+ } else {
+ BT_DBG("HS Enable failed");
+@@ -253,11 +254,31 @@ EXPORT_SYMBOL_GPL(btmrvl_enable_ps);
+
+ int btmrvl_enable_hs(struct btmrvl_private *priv)
+ {
++ struct btmrvl_adapter *adapter = priv->adapter;
+ int ret;
+
+ ret = btmrvl_send_sync_cmd(priv, BT_CMD_HOST_SLEEP_ENABLE, NULL, 0);
+- if (ret)
++ if (ret) {
+ BT_ERR("Host sleep enable command failed\n");
++ return ret;
++ }
++
++ ret = wait_event_interruptible_timeout(adapter->event_hs_wait_q,
++ adapter->hs_state,
++ msecs_to_jiffies(WAIT_UNTIL_HS_STATE_CHANGED));
++ if (ret < 0) {
++ BT_ERR("event_hs_wait_q terminated (%d): %d,%d,%d",
++ ret, adapter->hs_state, adapter->ps_state,
++ adapter->wakeup_tries);
++ } else if (!ret) {
++ BT_ERR("hs_enable timeout: %d,%d,%d", adapter->hs_state,
++ adapter->ps_state, adapter->wakeup_tries);
++ ret = -ETIMEDOUT;
++ } else {
++ BT_DBG("host sleep enabled: %d,%d,%d", adapter->hs_state,
++ adapter->ps_state, adapter->wakeup_tries);
++ ret = 0;
++ }
+
+ return ret;
+ }
+@@ -358,6 +379,7 @@ static void btmrvl_init_adapter(struct btmrvl_private *priv)
+ }
+
+ init_waitqueue_head(&priv->adapter->cmd_wait_q);
++ init_waitqueue_head(&priv->adapter->event_hs_wait_q);
+ }
+
+ static void btmrvl_free_adapter(struct btmrvl_private *priv)
+@@ -666,6 +688,7 @@ int btmrvl_remove_card(struct btmrvl_private *priv)
+ hdev = priv->btmrvl_dev.hcidev;
+
+ wake_up_interruptible(&priv->adapter->cmd_wait_q);
++ wake_up_interruptible(&priv->adapter->event_hs_wait_q);
+
+ kthread_stop(priv->main_thread.task);
+
+diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
+index 62e10fd1e1cb..6af17002a115 100644
+--- a/drivers/char/tpm/tpm-interface.c
++++ b/drivers/char/tpm/tpm-interface.c
+@@ -491,11 +491,10 @@ static int tpm_startup(struct tpm_chip *chip, __be16 startup_type)
+ int tpm_get_timeouts(struct tpm_chip *chip)
+ {
+ struct tpm_cmd_t tpm_cmd;
+- struct timeout_t *timeout_cap;
++ unsigned long new_timeout[4];
++ unsigned long old_timeout[4];
+ struct duration_t *duration_cap;
+ ssize_t rc;
+- u32 timeout;
+- unsigned int scale = 1;
+
+ tpm_cmd.header.in = tpm_getcap_header;
+ tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
+@@ -529,25 +528,46 @@ int tpm_get_timeouts(struct tpm_chip *chip)
+ != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
+ return -EINVAL;
+
+- timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout;
+- /* Don't overwrite default if value is 0 */
+- timeout = be32_to_cpu(timeout_cap->a);
+- if (timeout && timeout < 1000) {
+- /* timeouts in msec rather usec */
+- scale = 1000;
+- chip->vendor.timeout_adjusted = true;
++ old_timeout[0] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.a);
++ old_timeout[1] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.b);
++ old_timeout[2] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.c);
++ old_timeout[3] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.d);
++ memcpy(new_timeout, old_timeout, sizeof(new_timeout));
++
++ /*
++ * Provide ability for vendor overrides of timeout values in case
++ * of misreporting.
++ */
++ if (chip->ops->update_timeouts != NULL)
++ chip->vendor.timeout_adjusted =
++ chip->ops->update_timeouts(chip, new_timeout);
++
++ if (!chip->vendor.timeout_adjusted) {
++ /* Don't overwrite default if value is 0 */
++ if (new_timeout[0] != 0 && new_timeout[0] < 1000) {
++ int i;
++
++ /* timeouts in msec rather usec */
++ for (i = 0; i != ARRAY_SIZE(new_timeout); i++)
++ new_timeout[i] *= 1000;
++ chip->vendor.timeout_adjusted = true;
++ }
++ }
++
++ /* Report adjusted timeouts */
++ if (chip->vendor.timeout_adjusted) {
++ dev_info(chip->dev,
++ HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n",
++ old_timeout[0], new_timeout[0],
++ old_timeout[1], new_timeout[1],
++ old_timeout[2], new_timeout[2],
++ old_timeout[3], new_timeout[3]);
+ }
+- if (timeout)
+- chip->vendor.timeout_a = usecs_to_jiffies(timeout * scale);
+- timeout = be32_to_cpu(timeout_cap->b);
+- if (timeout)
+- chip->vendor.timeout_b = usecs_to_jiffies(timeout * scale);
+- timeout = be32_to_cpu(timeout_cap->c);
+- if (timeout)
+- chip->vendor.timeout_c = usecs_to_jiffies(timeout * scale);
+- timeout = be32_to_cpu(timeout_cap->d);
+- if (timeout)
+- chip->vendor.timeout_d = usecs_to_jiffies(timeout * scale);
++
++ chip->vendor.timeout_a = usecs_to_jiffies(new_timeout[0]);
++ chip->vendor.timeout_b = usecs_to_jiffies(new_timeout[1]);
++ chip->vendor.timeout_c = usecs_to_jiffies(new_timeout[2]);
++ chip->vendor.timeout_d = usecs_to_jiffies(new_timeout[3]);
+
+ duration:
+ tpm_cmd.header.in = tpm_getcap_header;
+@@ -991,13 +1011,13 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
+ int err, total = 0, retries = 5;
+ u8 *dest = out;
+
++ if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
++ return -EINVAL;
++
+ chip = tpm_chip_find_get(chip_num);
+ if (chip == NULL)
+ return -ENODEV;
+
+- if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
+- return -EINVAL;
+-
+ do {
+ tpm_cmd.header.in = tpm_getrandom_header;
+ tpm_cmd.params.getrandom_in.num_bytes = cpu_to_be32(num_bytes);
+@@ -1016,6 +1036,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
+ num_bytes -= recd;
+ } while (retries-- && total < max);
+
++ tpm_chip_put(chip);
+ return total ? total : -EIO;
+ }
+ EXPORT_SYMBOL_GPL(tpm_get_random);
+@@ -1095,7 +1116,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
+ goto del_misc;
+
+ if (tpm_add_ppi(&dev->kobj))
+- goto del_misc;
++ goto del_sysfs;
+
+ chip->bios_dir = tpm_bios_log_setup(chip->devname);
+
+@@ -1106,6 +1127,8 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
+
+ return chip;
+
++del_sysfs:
++ tpm_sysfs_del_device(chip);
+ del_misc:
+ tpm_dev_del_device(chip);
+ put_device:
+diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
+index a9ed2270c25d..2c46734b266d 100644
+--- a/drivers/char/tpm/tpm_tis.c
++++ b/drivers/char/tpm/tpm_tis.c
+@@ -373,6 +373,36 @@ out_err:
+ return rc;
+ }
+
++struct tis_vendor_timeout_override {
++ u32 did_vid;
++ unsigned long timeout_us[4];
++};
++
++static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = {
++ /* Atmel 3204 */
++ { 0x32041114, { (TIS_SHORT_TIMEOUT*1000), (TIS_LONG_TIMEOUT*1000),
++ (TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } },
++};
++
++static bool tpm_tis_update_timeouts(struct tpm_chip *chip,
++ unsigned long *timeout_cap)
++{
++ int i;
++ u32 did_vid;
++
++ did_vid = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
++
++ for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) {
++ if (vendor_timeout_overrides[i].did_vid != did_vid)
++ continue;
++ memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us,
++ sizeof(vendor_timeout_overrides[i].timeout_us));
++ return true;
++ }
++
++ return false;
++}
++
+ /*
+ * Early probing for iTPM with STS_DATA_EXPECT flaw.
+ * Try sending command without itpm flag set and if that
+@@ -437,6 +467,7 @@ static const struct tpm_class_ops tpm_tis = {
+ .recv = tpm_tis_recv,
+ .send = tpm_tis_send,
+ .cancel = tpm_tis_ready,
++ .update_timeouts = tpm_tis_update_timeouts,
+ .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_canceled = tpm_tis_req_canceled,
+diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
+index bb1d08dc8cc8..379c0837f5a9 100644
+--- a/drivers/cpufreq/powernv-cpufreq.c
++++ b/drivers/cpufreq/powernv-cpufreq.c
+@@ -28,6 +28,7 @@
+ #include <linux/of.h>
+
+ #include <asm/cputhreads.h>
++#include <asm/firmware.h>
+ #include <asm/reg.h>
+ #include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */
+
+@@ -98,7 +99,11 @@ static int init_powernv_pstates(void)
+ return -ENODEV;
+ }
+
+- WARN_ON(len_ids != len_freqs);
++ if (len_ids != len_freqs) {
++ pr_warn("Entries in ibm,pstate-ids and "
++ "ibm,pstate-frequencies-mhz does not match\n");
++ }
++
+ nr_pstates = min(len_ids, len_freqs) / sizeof(u32);
+ if (!nr_pstates) {
+ pr_warn("No PStates found\n");
+@@ -131,7 +136,12 @@ static unsigned int pstate_id_to_freq(int pstate_id)
+ int i;
+
+ i = powernv_pstate_info.max - pstate_id;
+- BUG_ON(i >= powernv_pstate_info.nr_pstates || i < 0);
++ if (i >= powernv_pstate_info.nr_pstates || i < 0) {
++ pr_warn("PState id %d outside of PState table, "
++ "reporting nominal id %d instead\n",
++ pstate_id, powernv_pstate_info.nominal);
++ i = powernv_pstate_info.max - powernv_pstate_info.nominal;
++ }
+
+ return powernv_freqs[i].frequency;
+ }
+@@ -321,6 +331,10 @@ static int __init powernv_cpufreq_init(void)
+ {
+ int rc = 0;
+
++ /* Don't probe on pseries (guest) platforms */
++ if (!firmware_has_feature(FW_FEATURE_OPALv3))
++ return -ENODEV;
++
+ /* Discover pstates from device tree and init */
+ rc = init_powernv_pstates();
+ if (rc) {
+diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
+index 74f5788d50b1..a64be578dab2 100644
+--- a/drivers/cpuidle/cpuidle-powernv.c
++++ b/drivers/cpuidle/cpuidle-powernv.c
+@@ -160,10 +160,10 @@ static int powernv_cpuidle_driver_init(void)
+ static int powernv_add_idle_states(void)
+ {
+ struct device_node *power_mgt;
+- struct property *prop;
+ int nr_idle_states = 1; /* Snooze */
+ int dt_idle_states;
+- u32 *flags;
++ const __be32 *idle_state_flags;
++ u32 len_flags, flags;
+ int i;
+
+ /* Currently we have snooze statically defined */
+@@ -174,18 +174,18 @@ static int powernv_add_idle_states(void)
+ return nr_idle_states;
+ }
+
+- prop = of_find_property(power_mgt, "ibm,cpu-idle-state-flags", NULL);
+- if (!prop) {
++ idle_state_flags = of_get_property(power_mgt, "ibm,cpu-idle-state-flags", &len_flags);
++ if (!idle_state_flags) {
+ pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-flags\n");
+ return nr_idle_states;
+ }
+
+- dt_idle_states = prop->length / sizeof(u32);
+- flags = (u32 *) prop->value;
++ dt_idle_states = len_flags / sizeof(u32);
+
+ for (i = 0; i < dt_idle_states; i++) {
+
+- if (flags[i] & IDLE_USE_INST_NAP) {
++ flags = be32_to_cpu(idle_state_flags[i]);
++ if (flags & IDLE_USE_INST_NAP) {
+ /* Add NAP state */
+ strcpy(powernv_states[nr_idle_states].name, "Nap");
+ strcpy(powernv_states[nr_idle_states].desc, "Nap");
+@@ -196,7 +196,7 @@ static int powernv_add_idle_states(void)
+ nr_idle_states++;
+ }
+
+- if (flags[i] & IDLE_USE_INST_SLEEP) {
++ if (flags & IDLE_USE_INST_SLEEP) {
+ /* Add FASTSLEEP state */
+ strcpy(powernv_states[nr_idle_states].name, "FastSleep");
+ strcpy(powernv_states[nr_idle_states].desc, "FastSleep");
+diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
+index f0a43646a2f3..5abe943e3404 100644
+--- a/drivers/firmware/efi/vars.c
++++ b/drivers/firmware/efi/vars.c
+@@ -481,7 +481,7 @@ EXPORT_SYMBOL_GPL(efivar_entry_remove);
+ */
+ static void efivar_entry_list_del_unlock(struct efivar_entry *entry)
+ {
+- WARN_ON(!spin_is_locked(&__efivars->lock));
++ lockdep_assert_held(&__efivars->lock);
+
+ list_del(&entry->list);
+ spin_unlock_irq(&__efivars->lock);
+@@ -507,7 +507,7 @@ int __efivar_entry_delete(struct efivar_entry *entry)
+ const struct efivar_operations *ops = __efivars->ops;
+ efi_status_t status;
+
+- WARN_ON(!spin_is_locked(&__efivars->lock));
++ lockdep_assert_held(&__efivars->lock);
+
+ status = ops->set_variable(entry->var.VariableName,
+ &entry->var.VendorGuid,
+@@ -667,7 +667,7 @@ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
+ int strsize1, strsize2;
+ bool found = false;
+
+- WARN_ON(!spin_is_locked(&__efivars->lock));
++ lockdep_assert_held(&__efivars->lock);
+
+ list_for_each_entry_safe(entry, n, head, list) {
+ strsize1 = ucs2_strsize(name, 1024);
+@@ -739,7 +739,7 @@ int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
+ const struct efivar_operations *ops = __efivars->ops;
+ efi_status_t status;
+
+- WARN_ON(!spin_is_locked(&__efivars->lock));
++ lockdep_assert_held(&__efivars->lock);
+
+ status = ops->get_variable(entry->var.VariableName,
+ &entry->var.VendorGuid,
+diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
+index 47ad74255bf1..dd469dbeaae1 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_display.c
++++ b/drivers/gpu/drm/nouveau/nouveau_display.c
+@@ -404,6 +404,11 @@ nouveau_display_fini(struct drm_device *dev)
+ {
+ struct nouveau_display *disp = nouveau_display(dev);
+ struct drm_connector *connector;
++ int head;
++
++ /* Make sure that drm and hw vblank irqs get properly disabled. */
++ for (head = 0; head < dev->mode_config.num_crtc; head++)
++ drm_vblank_off(dev, head);
+
+ /* disable hotplug interrupts */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+@@ -620,6 +625,8 @@ void
+ nouveau_display_resume(struct drm_device *dev)
+ {
+ struct drm_crtc *crtc;
++ int head;
++
+ nouveau_display_init(dev);
+
+ /* Force CLUT to get re-loaded during modeset */
+@@ -629,6 +636,10 @@ nouveau_display_resume(struct drm_device *dev)
+ nv_crtc->lut.depth = 0;
+ }
+
++ /* Make sure that drm and hw vblank irqs get resumed if needed. */
++ for (head = 0; head < dev->mode_config.num_crtc; head++)
++ drm_vblank_on(dev, head);
++
+ drm_helper_resume_force_mode(dev);
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
+index 7efbafaf7c1d..b628addcdf69 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
++++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
+@@ -10,7 +10,7 @@
+
+ #define DRIVER_MAJOR 1
+ #define DRIVER_MINOR 1
+-#define DRIVER_PATCHLEVEL 1
++#define DRIVER_PATCHLEVEL 2
+
+ /*
+ * 1.1.1:
+@@ -21,6 +21,8 @@
+ * to control registers on the MPs to enable performance counters,
+ * and to control the warp error enable mask (OpenGL requires out of
+ * bounds access to local memory to be silently ignored / return 0).
++ * 1.1.2:
++ * - fixes multiple bugs in flip completion events and timestamping
+ */
+
+ #include <core/client.h>
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index 767f2cc44bd8..65a8cca603a4 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -7901,6 +7901,7 @@ restart_ih:
+ static int cik_startup(struct radeon_device *rdev)
+ {
+ struct radeon_ring *ring;
++ u32 nop;
+ int r;
+
+ /* enable pcie gen2/3 link */
+@@ -8034,9 +8035,15 @@ static int cik_startup(struct radeon_device *rdev)
+ }
+ cik_irq_set(rdev);
+
++ if (rdev->family == CHIP_HAWAII) {
++ nop = RADEON_CP_PACKET2;
++ } else {
++ nop = PACKET3(PACKET3_NOP, 0x3FFF);
++ }
++
+ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+- PACKET3(PACKET3_NOP, 0x3FFF));
++ nop);
+ if (r)
+ return r;
+
+@@ -8044,7 +8051,7 @@ static int cik_startup(struct radeon_device *rdev)
+ /* type-2 packets are deprecated on MEC, use type-3 instead */
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
+- PACKET3(PACKET3_NOP, 0x3FFF));
++ nop);
+ if (r)
+ return r;
+ ring->me = 1; /* first MEC */
+@@ -8055,7 +8062,7 @@ static int cik_startup(struct radeon_device *rdev)
+ /* type-2 packets are deprecated on MEC, use type-3 instead */
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
+- PACKET3(PACKET3_NOP, 0x3FFF));
++ nop);
+ if (r)
+ return r;
+ /* dGPU only have 1 MEC */
+diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
+index 3d2e489ab732..ff9163dc1596 100644
+--- a/drivers/infiniband/core/iwcm.c
++++ b/drivers/infiniband/core/iwcm.c
+@@ -46,6 +46,7 @@
+ #include <linux/completion.h>
+ #include <linux/slab.h>
+ #include <linux/module.h>
++#include <linux/sysctl.h>
+
+ #include <rdma/iw_cm.h>
+ #include <rdma/ib_addr.h>
+@@ -65,6 +66,20 @@ struct iwcm_work {
+ struct list_head free_list;
+ };
+
++static unsigned int default_backlog = 256;
++
++static struct ctl_table_header *iwcm_ctl_table_hdr;
++static struct ctl_table iwcm_ctl_table[] = {
++ {
++ .procname = "default_backlog",
++ .data = &default_backlog,
++ .maxlen = sizeof(default_backlog),
++ .mode = 0644,
++ .proc_handler = proc_dointvec,
++ },
++ { }
++};
++
+ /*
+ * The following services provide a mechanism for pre-allocating iwcm_work
+ * elements. The design pre-allocates them based on the cm_id type:
+@@ -425,6 +440,9 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
+
+ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+
++ if (!backlog)
++ backlog = default_backlog;
++
+ ret = alloc_work_entries(cm_id_priv, backlog);
+ if (ret)
+ return ret;
+@@ -1030,11 +1048,20 @@ static int __init iw_cm_init(void)
+ if (!iwcm_wq)
+ return -ENOMEM;
+
++ iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm",
++ iwcm_ctl_table);
++ if (!iwcm_ctl_table_hdr) {
++ pr_err("iw_cm: couldn't register sysctl paths\n");
++ destroy_workqueue(iwcm_wq);
++ return -ENOMEM;
++ }
++
+ return 0;
+ }
+
+ static void __exit iw_cm_cleanup(void)
+ {
++ unregister_net_sysctl_table(iwcm_ctl_table_hdr);
+ destroy_workqueue(iwcm_wq);
+ }
+
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index e3c2c5b4297f..767000811cf9 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -130,6 +130,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
+ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
+
+ static struct scsi_transport_template *ib_srp_transport_template;
++static struct workqueue_struct *srp_remove_wq;
+
+ static struct ib_client srp_client = {
+ .name = "srp",
+@@ -731,7 +732,7 @@ static bool srp_queue_remove_work(struct srp_target_port *target)
+ spin_unlock_irq(&target->lock);
+
+ if (changed)
+- queue_work(system_long_wq, &target->remove_work);
++ queue_work(srp_remove_wq, &target->remove_work);
+
+ return changed;
+ }
+@@ -3261,9 +3262,10 @@ static void srp_remove_one(struct ib_device *device)
+ spin_unlock(&host->target_lock);
+
+ /*
+- * Wait for target port removal tasks.
++ * Wait for tl_err and target port removal tasks.
+ */
+ flush_workqueue(system_long_wq);
++ flush_workqueue(srp_remove_wq);
+
+ kfree(host);
+ }
+@@ -3313,16 +3315,22 @@ static int __init srp_init_module(void)
+ indirect_sg_entries = cmd_sg_entries;
+ }
+
++ srp_remove_wq = create_workqueue("srp_remove");
++ if (IS_ERR(srp_remove_wq)) {
++ ret = PTR_ERR(srp_remove_wq);
++ goto out;
++ }
++
++ ret = -ENOMEM;
+ ib_srp_transport_template =
+ srp_attach_transport(&ib_srp_transport_functions);
+ if (!ib_srp_transport_template)
+- return -ENOMEM;
++ goto destroy_wq;
+
+ ret = class_register(&srp_class);
+ if (ret) {
+ pr_err("couldn't register class infiniband_srp\n");
+- srp_release_transport(ib_srp_transport_template);
+- return ret;
++ goto release_tr;
+ }
+
+ ib_sa_register_client(&srp_sa_client);
+@@ -3330,13 +3338,22 @@ static int __init srp_init_module(void)
+ ret = ib_register_client(&srp_client);
+ if (ret) {
+ pr_err("couldn't register IB client\n");
+- srp_release_transport(ib_srp_transport_template);
+- ib_sa_unregister_client(&srp_sa_client);
+- class_unregister(&srp_class);
+- return ret;
++ goto unreg_sa;
+ }
+
+- return 0;
++out:
++ return ret;
++
++unreg_sa:
++ ib_sa_unregister_client(&srp_sa_client);
++ class_unregister(&srp_class);
++
++release_tr:
++ srp_release_transport(ib_srp_transport_template);
++
++destroy_wq:
++ destroy_workqueue(srp_remove_wq);
++ goto out;
+ }
+
+ static void __exit srp_cleanup_module(void)
+@@ -3345,6 +3362,7 @@ static void __exit srp_cleanup_module(void)
+ ib_sa_unregister_client(&srp_sa_client);
+ class_unregister(&srp_class);
+ srp_release_transport(ib_srp_transport_template);
++ destroy_workqueue(srp_remove_wq);
+ }
+
+ module_init(srp_init_module);
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 4aec6a29e316..710ffa1830ae 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -3227,14 +3227,16 @@ free_domains:
+
+ static void cleanup_domain(struct protection_domain *domain)
+ {
+- struct iommu_dev_data *dev_data, *next;
++ struct iommu_dev_data *entry;
+ unsigned long flags;
+
+ write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+
+- list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
+- __detach_device(dev_data);
+- atomic_set(&dev_data->bind, 0);
++ while (!list_empty(&domain->dev_list)) {
++ entry = list_first_entry(&domain->dev_list,
++ struct iommu_dev_data, list);
++ __detach_device(entry);
++ atomic_set(&entry->bind, 0);
+ }
+
+ write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 51b6b77dc3e5..382c1801a8f1 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -2523,22 +2523,46 @@ static bool device_has_rmrr(struct device *dev)
+ return false;
+ }
+
++/*
++ * There are a couple cases where we need to restrict the functionality of
++ * devices associated with RMRRs. The first is when evaluating a device for
++ * identity mapping because problems exist when devices are moved in and out
++ * of domains and their respective RMRR information is lost. This means that
++ * a device with associated RMRRs will never be in a "passthrough" domain.
++ * The second is use of the device through the IOMMU API. This interface
++ * expects to have full control of the IOVA space for the device. We cannot
++ * satisfy both the requirement that RMRR access is maintained and have an
++ * unencumbered IOVA space. We also have no ability to quiesce the device's
++ * use of the RMRR space or even inform the IOMMU API user of the restriction.
++ * We therefore prevent devices associated with an RMRR from participating in
++ * the IOMMU API, which eliminates them from device assignment.
++ *
++ * In both cases we assume that PCI USB devices with RMRRs have them largely
++ * for historical reasons and that the RMRR space is not actively used post
++ * boot. This exclusion may change if vendors begin to abuse it.
++ */
++static bool device_is_rmrr_locked(struct device *dev)
++{
++ if (!device_has_rmrr(dev))
++ return false;
++
++ if (dev_is_pci(dev)) {
++ struct pci_dev *pdev = to_pci_dev(dev);
++
++ if ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
++ return false;
++ }
++
++ return true;
++}
++
+ static int iommu_should_identity_map(struct device *dev, int startup)
+ {
+
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+- /*
+- * We want to prevent any device associated with an RMRR from
+- * getting placed into the SI Domain. This is done because
+- * problems exist when devices are moved in and out of domains
+- * and their respective RMRR info is lost. We exempt USB devices
+- * from this process due to their usage of RMRRs that are known
+- * to not be needed after BIOS hand-off to OS.
+- */
+- if (device_has_rmrr(dev) &&
+- (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
++ if (device_is_rmrr_locked(dev))
+ return 0;
+
+ if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
+@@ -3867,6 +3891,14 @@ static int device_notifier(struct notifier_block *nb,
+ action != BUS_NOTIFY_DEL_DEVICE)
+ return 0;
+
++ /*
++ * If the device is still attached to a device driver we can't
++ * tear down the domain yet as DMA mappings may still be in use.
++ * Wait for the BUS_NOTIFY_UNBOUND_DRIVER event to do that.
++ */
++ if (action == BUS_NOTIFY_DEL_DEVICE && dev->driver != NULL)
++ return 0;
++
+ domain = find_domain(dev);
+ if (!domain)
+ return 0;
+@@ -4202,6 +4234,11 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
+ int addr_width;
+ u8 bus, devfn;
+
++ if (device_is_rmrr_locked(dev)) {
++ dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
++ return -EPERM;
++ }
++
+ /* normally dev is not mapped */
+ if (unlikely(domain_context_mapped(dev))) {
+ struct dmar_domain *old_domain;
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 5f59f1e3e5b1..922791009fc5 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -1386,6 +1386,14 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
+ return q && !blk_queue_add_random(q);
+ }
+
++static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev,
++ sector_t start, sector_t len, void *data)
++{
++ struct request_queue *q = bdev_get_queue(dev->bdev);
++
++ return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
++}
++
+ static bool dm_table_all_devices_attribute(struct dm_table *t,
+ iterate_devices_callout_fn func)
+ {
+@@ -1464,6 +1472,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+ if (!dm_table_supports_write_same(t))
+ q->limits.max_write_same_sectors = 0;
+
++ if (dm_table_all_devices_attribute(t, queue_supports_sg_merge))
++ queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
++ else
++ queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
++
+ dm_table_set_integrity(t);
+
+ /*
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 56e24c072b62..d7690f86fdb9 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1501,12 +1501,12 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
+ mddev->degraded++;
+ set_bit(Faulty, &rdev->flags);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+- /*
+- * if recovery is running, make sure it aborts.
+- */
+- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ } else
+ set_bit(Faulty, &rdev->flags);
++ /*
++ * if recovery is running, make sure it aborts.
++ */
++ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ printk(KERN_ALERT
+ "md/raid1:%s: Disk failure on %s, disabling device.\n"
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index cb882aae9e20..a46124ecafc7 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1684,13 +1684,12 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ return;
+ }
+- if (test_and_clear_bit(In_sync, &rdev->flags)) {
++ if (test_and_clear_bit(In_sync, &rdev->flags))
+ mddev->degraded++;
+- /*
+- * if recovery is running, make sure it aborts.
+- */
+- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+- }
++ /*
++ * If recovery is running, make sure it aborts.
++ */
++ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ set_bit(Blocked, &rdev->flags);
+ set_bit(Faulty, &rdev->flags);
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
+@@ -2954,6 +2953,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
+ */
+ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
+ end_reshape(conf);
++ close_sync(conf);
+ return 0;
+ }
+
+@@ -4411,7 +4411,7 @@ read_more:
+ read_bio->bi_private = r10_bio;
+ read_bio->bi_end_io = end_sync_read;
+ read_bio->bi_rw = READ;
+- read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
++ read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
+ read_bio->bi_flags |= 1 << BIO_UPTODATE;
+ read_bio->bi_vcnt = 0;
+ read_bio->bi_iter.bi_size = 0;
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 6234b2e84587..183588b11fc1 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -2922,7 +2922,7 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
+ (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) &&
+ !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
+ (sh->raid_conf->level == 6 && s->failed && s->to_write &&
+- s->to_write < sh->raid_conf->raid_disks - 2 &&
++ s->to_write - s->non_overwrite < sh->raid_conf->raid_disks - 2 &&
+ (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))))) {
+ /* we would like to get this block, possibly by computing it,
+ * otherwise read it if the backing disk is insync
+@@ -3817,6 +3817,8 @@ static void handle_stripe(struct stripe_head *sh)
+ set_bit(R5_Wantwrite, &dev->flags);
+ if (prexor)
+ continue;
++ if (s.failed > 1)
++ continue;
+ if (!test_bit(R5_Insync, &dev->flags) ||
+ ((i == sh->pd_idx || i == sh->qd_idx) &&
+ s.failed == 0))
+diff --git a/drivers/media/common/siano/Kconfig b/drivers/media/common/siano/Kconfig
+index f953d33ee151..4bfbd5f463d1 100644
+--- a/drivers/media/common/siano/Kconfig
++++ b/drivers/media/common/siano/Kconfig
+@@ -22,8 +22,7 @@ config SMS_SIANO_DEBUGFS
+ bool "Enable debugfs for smsdvb"
+ depends on SMS_SIANO_MDTV
+ depends on DEBUG_FS
+- depends on SMS_USB_DRV
+- depends on CONFIG_SMS_USB_DRV = CONFIG_SMS_SDIO_DRV
++ depends on SMS_USB_DRV = SMS_SDIO_DRV
+
+ ---help---
+ Choose Y to enable visualizing a dump of the frontend
+diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
+index 40172b8d8ea2..f04d0bbd9cfd 100644
+--- a/drivers/media/i2c/mt9v032.c
++++ b/drivers/media/i2c/mt9v032.c
+@@ -305,8 +305,8 @@ mt9v032_update_hblank(struct mt9v032 *mt9v032)
+
+ if (mt9v032->version->version == MT9V034_CHIP_ID_REV1)
+ min_hblank += (mt9v032->hratio - 1) * 10;
+- min_hblank = max_t(unsigned int, (int)mt9v032->model->data->min_row_time - crop->width,
+- (int)min_hblank);
++ min_hblank = max_t(int, mt9v032->model->data->min_row_time - crop->width,
++ min_hblank);
+ hblank = max_t(unsigned int, mt9v032->hblank, min_hblank);
+
+ return mt9v032_write(client, MT9V032_HORIZONTAL_BLANKING, hblank);
+diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
+index 88b97c9e64ac..73a432934bd8 100644
+--- a/drivers/media/media-device.c
++++ b/drivers/media/media-device.c
+@@ -106,8 +106,6 @@ static long media_device_enum_entities(struct media_device *mdev,
+ if (ent->name) {
+ strncpy(u_ent.name, ent->name, sizeof(u_ent.name));
+ u_ent.name[sizeof(u_ent.name) - 1] = '\0';
+- } else {
+- memset(u_ent.name, 0, sizeof(u_ent.name));
+ }
+ u_ent.type = ent->type;
+ u_ent.revision = ent->revision;
+diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
+index 8a1253e51f04..677e3aa04eee 100644
+--- a/drivers/media/platform/vsp1/vsp1_video.c
++++ b/drivers/media/platform/vsp1/vsp1_video.c
+@@ -654,8 +654,6 @@ static int vsp1_video_buffer_prepare(struct vb2_buffer *vb)
+ if (vb->num_planes < format->num_planes)
+ return -EINVAL;
+
+- buf->video = video;
+-
+ for (i = 0; i < vb->num_planes; ++i) {
+ buf->addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
+ buf->length[i] = vb2_plane_size(vb, i);
+diff --git a/drivers/media/platform/vsp1/vsp1_video.h b/drivers/media/platform/vsp1/vsp1_video.h
+index c04d48fa2999..7284320d5433 100644
+--- a/drivers/media/platform/vsp1/vsp1_video.h
++++ b/drivers/media/platform/vsp1/vsp1_video.h
+@@ -90,7 +90,6 @@ static inline struct vsp1_pipeline *to_vsp1_pipeline(struct media_entity *e)
+ }
+
+ struct vsp1_video_buffer {
+- struct vsp1_video *video;
+ struct vb2_buffer buf;
+ struct list_head queue;
+
+diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
+index 2018befabb5a..e71decbfd0af 100644
+--- a/drivers/media/tuners/xc4000.c
++++ b/drivers/media/tuners/xc4000.c
+@@ -93,7 +93,7 @@ struct xc4000_priv {
+ struct firmware_description *firm;
+ int firm_size;
+ u32 if_khz;
+- u32 freq_hz;
++ u32 freq_hz, freq_offset;
+ u32 bandwidth;
+ u8 video_standard;
+ u8 rf_mode;
+@@ -1157,14 +1157,14 @@ static int xc4000_set_params(struct dvb_frontend *fe)
+ case SYS_ATSC:
+ dprintk(1, "%s() VSB modulation\n", __func__);
+ priv->rf_mode = XC_RF_MODE_AIR;
+- priv->freq_hz = c->frequency - 1750000;
++ priv->freq_offset = 1750000;
+ priv->video_standard = XC4000_DTV6;
+ type = DTV6;
+ break;
+ case SYS_DVBC_ANNEX_B:
+ dprintk(1, "%s() QAM modulation\n", __func__);
+ priv->rf_mode = XC_RF_MODE_CABLE;
+- priv->freq_hz = c->frequency - 1750000;
++ priv->freq_offset = 1750000;
+ priv->video_standard = XC4000_DTV6;
+ type = DTV6;
+ break;
+@@ -1173,23 +1173,23 @@ static int xc4000_set_params(struct dvb_frontend *fe)
+ dprintk(1, "%s() OFDM\n", __func__);
+ if (bw == 0) {
+ if (c->frequency < 400000000) {
+- priv->freq_hz = c->frequency - 2250000;
++ priv->freq_offset = 2250000;
+ } else {
+- priv->freq_hz = c->frequency - 2750000;
++ priv->freq_offset = 2750000;
+ }
+ priv->video_standard = XC4000_DTV7_8;
+ type = DTV78;
+ } else if (bw <= 6000000) {
+ priv->video_standard = XC4000_DTV6;
+- priv->freq_hz = c->frequency - 1750000;
++ priv->freq_offset = 1750000;
+ type = DTV6;
+ } else if (bw <= 7000000) {
+ priv->video_standard = XC4000_DTV7;
+- priv->freq_hz = c->frequency - 2250000;
++ priv->freq_offset = 2250000;
+ type = DTV7;
+ } else {
+ priv->video_standard = XC4000_DTV8;
+- priv->freq_hz = c->frequency - 2750000;
++ priv->freq_offset = 2750000;
+ type = DTV8;
+ }
+ priv->rf_mode = XC_RF_MODE_AIR;
+@@ -1200,6 +1200,8 @@ static int xc4000_set_params(struct dvb_frontend *fe)
+ goto fail;
+ }
+
++ priv->freq_hz = c->frequency - priv->freq_offset;
++
+ dprintk(1, "%s() frequency=%d (compensated)\n",
+ __func__, priv->freq_hz);
+
+@@ -1520,7 +1522,7 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
+ {
+ struct xc4000_priv *priv = fe->tuner_priv;
+
+- *freq = priv->freq_hz;
++ *freq = priv->freq_hz + priv->freq_offset;
+
+ if (debug) {
+ mutex_lock(&priv->lock);
+diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
+index 2b3d514be672..3091cf7be7a1 100644
+--- a/drivers/media/tuners/xc5000.c
++++ b/drivers/media/tuners/xc5000.c
+@@ -56,7 +56,7 @@ struct xc5000_priv {
+
+ u32 if_khz;
+ u16 xtal_khz;
+- u32 freq_hz;
++ u32 freq_hz, freq_offset;
+ u32 bandwidth;
+ u8 video_standard;
+ u8 rf_mode;
+@@ -749,13 +749,13 @@ static int xc5000_set_params(struct dvb_frontend *fe)
+ case SYS_ATSC:
+ dprintk(1, "%s() VSB modulation\n", __func__);
+ priv->rf_mode = XC_RF_MODE_AIR;
+- priv->freq_hz = freq - 1750000;
++ priv->freq_offset = 1750000;
+ priv->video_standard = DTV6;
+ break;
+ case SYS_DVBC_ANNEX_B:
+ dprintk(1, "%s() QAM modulation\n", __func__);
+ priv->rf_mode = XC_RF_MODE_CABLE;
+- priv->freq_hz = freq - 1750000;
++ priv->freq_offset = 1750000;
+ priv->video_standard = DTV6;
+ break;
+ case SYS_ISDBT:
+@@ -770,15 +770,15 @@ static int xc5000_set_params(struct dvb_frontend *fe)
+ switch (bw) {
+ case 6000000:
+ priv->video_standard = DTV6;
+- priv->freq_hz = freq - 1750000;
++ priv->freq_offset = 1750000;
+ break;
+ case 7000000:
+ priv->video_standard = DTV7;
+- priv->freq_hz = freq - 2250000;
++ priv->freq_offset = 2250000;
+ break;
+ case 8000000:
+ priv->video_standard = DTV8;
+- priv->freq_hz = freq - 2750000;
++ priv->freq_offset = 2750000;
+ break;
+ default:
+ printk(KERN_ERR "xc5000 bandwidth not set!\n");
+@@ -792,15 +792,15 @@ static int xc5000_set_params(struct dvb_frontend *fe)
+ priv->rf_mode = XC_RF_MODE_CABLE;
+ if (bw <= 6000000) {
+ priv->video_standard = DTV6;
+- priv->freq_hz = freq - 1750000;
++ priv->freq_offset = 1750000;
+ b = 6;
+ } else if (bw <= 7000000) {
+ priv->video_standard = DTV7;
+- priv->freq_hz = freq - 2250000;
++ priv->freq_offset = 2250000;
+ b = 7;
+ } else {
+ priv->video_standard = DTV7_8;
+- priv->freq_hz = freq - 2750000;
++ priv->freq_offset = 2750000;
+ b = 8;
+ }
+ dprintk(1, "%s() Bandwidth %dMHz (%d)\n", __func__,
+@@ -811,6 +811,8 @@ static int xc5000_set_params(struct dvb_frontend *fe)
+ return -EINVAL;
+ }
+
++ priv->freq_hz = freq - priv->freq_offset;
++
+ dprintk(1, "%s() frequency=%d (compensated to %d)\n",
+ __func__, freq, priv->freq_hz);
+
+@@ -1061,7 +1063,7 @@ static int xc5000_get_frequency(struct dvb_frontend *fe, u32 *freq)
+ {
+ struct xc5000_priv *priv = fe->tuner_priv;
+ dprintk(1, "%s()\n", __func__);
+- *freq = priv->freq_hz;
++ *freq = priv->freq_hz + priv->freq_offset;
+ return 0;
+ }
+
+diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
+index 9038194513c5..49124b76e4cf 100644
+--- a/drivers/media/usb/au0828/au0828-video.c
++++ b/drivers/media/usb/au0828/au0828-video.c
+@@ -787,11 +787,27 @@ static int au0828_i2s_init(struct au0828_dev *dev)
+
+ /*
+ * Auvitek au0828 analog stream enable
+- * Please set interface0 to AS5 before enable the stream
+ */
+ static int au0828_analog_stream_enable(struct au0828_dev *d)
+ {
++ struct usb_interface *iface;
++ int ret;
++
+ dprintk(1, "au0828_analog_stream_enable called\n");
++
++ iface = usb_ifnum_to_if(d->usbdev, 0);
++ if (iface && iface->cur_altsetting->desc.bAlternateSetting != 5) {
++ dprintk(1, "Changing intf#0 to alt 5\n");
++ /* set au0828 interface0 to AS5 here again */
++ ret = usb_set_interface(d->usbdev, 0, 5);
++ if (ret < 0) {
++ printk(KERN_INFO "Au0828 can't set alt setting to 5!\n");
++ return -EBUSY;
++ }
++ }
++
++ /* FIXME: size should be calculated using d->width, d->height */
++
+ au0828_writereg(d, AU0828_SENSORCTRL_VBI_103, 0x00);
+ au0828_writereg(d, 0x106, 0x00);
+ /* set x position */
+@@ -1002,15 +1018,6 @@ static int au0828_v4l2_open(struct file *filp)
+ return -ERESTARTSYS;
+ }
+ if (dev->users == 0) {
+- /* set au0828 interface0 to AS5 here again */
+- ret = usb_set_interface(dev->usbdev, 0, 5);
+- if (ret < 0) {
+- mutex_unlock(&dev->lock);
+- printk(KERN_INFO "Au0828 can't set alternate to 5!\n");
+- kfree(fh);
+- return -EBUSY;
+- }
+-
+ au0828_analog_stream_enable(dev);
+ au0828_analog_stream_reset(dev);
+
+@@ -1252,13 +1259,6 @@ static int au0828_set_format(struct au0828_dev *dev, unsigned int cmd,
+ }
+ }
+
+- /* set au0828 interface0 to AS5 here again */
+- ret = usb_set_interface(dev->usbdev, 0, 5);
+- if (ret < 0) {
+- printk(KERN_INFO "Au0828 can't set alt setting to 5!\n");
+- return -EBUSY;
+- }
+-
+ au0828_analog_stream_enable(dev);
+
+ return 0;
+diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
+index 7c4489c42365..1d67e95311d6 100644
+--- a/drivers/media/v4l2-core/videobuf2-core.c
++++ b/drivers/media/v4l2-core/videobuf2-core.c
+@@ -1750,12 +1750,14 @@ static int vb2_start_streaming(struct vb2_queue *q)
+ __enqueue_in_driver(vb);
+
+ /* Tell the driver to start streaming */
++ q->start_streaming_called = 1;
+ ret = call_qop(q, start_streaming, q,
+ atomic_read(&q->owned_by_drv_count));
+- q->start_streaming_called = ret == 0;
+ if (!ret)
+ return 0;
+
++ q->start_streaming_called = 0;
++
+ dprintk(1, "driver refused to start streaming\n");
+ if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
+ unsigned i;
+diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
+index b48d80c367f9..33a9234b701c 100644
+--- a/drivers/mfd/omap-usb-host.c
++++ b/drivers/mfd/omap-usb-host.c
+@@ -445,7 +445,7 @@ static unsigned omap_usbhs_rev1_hostconfig(struct usbhs_hcd_omap *omap,
+
+ for (i = 0; i < omap->nports; i++) {
+ if (is_ehci_phy_mode(pdata->port_mode[i])) {
+- reg &= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
++ reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
+ break;
+ }
+ }
+diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c
+index 6352bec8419a..71f387ce8cbd 100644
+--- a/drivers/mfd/rtsx_usb.c
++++ b/drivers/mfd/rtsx_usb.c
+@@ -744,6 +744,7 @@ static struct usb_device_id rtsx_usb_usb_ids[] = {
+ { USB_DEVICE(0x0BDA, 0x0140) },
+ { }
+ };
++MODULE_DEVICE_TABLE(usb, rtsx_usb_usb_ids);
+
+ static struct usb_driver rtsx_usb_driver = {
+ .name = "rtsx_usb",
+diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
+index 3bc969a5916b..4d3ff3771491 100644
+--- a/drivers/mfd/twl4030-power.c
++++ b/drivers/mfd/twl4030-power.c
+@@ -724,24 +724,24 @@ static struct twl4030_script *omap3_idle_scripts[] = {
+ * above.
+ */
+ static struct twl4030_resconfig omap3_idle_rconfig[] = {
+- TWL_REMAP_SLEEP(RES_VAUX1, DEV_GRP_NULL, 0, 0),
+- TWL_REMAP_SLEEP(RES_VAUX2, DEV_GRP_NULL, 0, 0),
+- TWL_REMAP_SLEEP(RES_VAUX3, DEV_GRP_NULL, 0, 0),
+- TWL_REMAP_SLEEP(RES_VAUX4, DEV_GRP_NULL, 0, 0),
+- TWL_REMAP_SLEEP(RES_VMMC1, DEV_GRP_NULL, 0, 0),
+- TWL_REMAP_SLEEP(RES_VMMC2, DEV_GRP_NULL, 0, 0),
++ TWL_REMAP_SLEEP(RES_VAUX1, TWL4030_RESCONFIG_UNDEF, 0, 0),
++ TWL_REMAP_SLEEP(RES_VAUX2, TWL4030_RESCONFIG_UNDEF, 0, 0),
++ TWL_REMAP_SLEEP(RES_VAUX3, TWL4030_RESCONFIG_UNDEF, 0, 0),
++ TWL_REMAP_SLEEP(RES_VAUX4, TWL4030_RESCONFIG_UNDEF, 0, 0),
++ TWL_REMAP_SLEEP(RES_VMMC1, TWL4030_RESCONFIG_UNDEF, 0, 0),
++ TWL_REMAP_SLEEP(RES_VMMC2, TWL4030_RESCONFIG_UNDEF, 0, 0),
+ TWL_REMAP_OFF(RES_VPLL1, DEV_GRP_P1, 3, 1),
+ TWL_REMAP_SLEEP(RES_VPLL2, DEV_GRP_P1, 0, 0),
+- TWL_REMAP_SLEEP(RES_VSIM, DEV_GRP_NULL, 0, 0),
+- TWL_REMAP_SLEEP(RES_VDAC, DEV_GRP_NULL, 0, 0),
++ TWL_REMAP_SLEEP(RES_VSIM, TWL4030_RESCONFIG_UNDEF, 0, 0),
++ TWL_REMAP_SLEEP(RES_VDAC, TWL4030_RESCONFIG_UNDEF, 0, 0),
+ TWL_REMAP_SLEEP(RES_VINTANA1, TWL_DEV_GRP_P123, 1, 2),
+ TWL_REMAP_SLEEP(RES_VINTANA2, TWL_DEV_GRP_P123, 0, 2),
+ TWL_REMAP_SLEEP(RES_VINTDIG, TWL_DEV_GRP_P123, 1, 2),
+ TWL_REMAP_SLEEP(RES_VIO, TWL_DEV_GRP_P123, 2, 2),
+ TWL_REMAP_OFF(RES_VDD1, DEV_GRP_P1, 4, 1),
+ TWL_REMAP_OFF(RES_VDD2, DEV_GRP_P1, 3, 1),
+- TWL_REMAP_SLEEP(RES_VUSB_1V5, DEV_GRP_NULL, 0, 0),
+- TWL_REMAP_SLEEP(RES_VUSB_1V8, DEV_GRP_NULL, 0, 0),
++ TWL_REMAP_SLEEP(RES_VUSB_1V5, TWL4030_RESCONFIG_UNDEF, 0, 0),
++ TWL_REMAP_SLEEP(RES_VUSB_1V8, TWL4030_RESCONFIG_UNDEF, 0, 0),
+ TWL_REMAP_SLEEP(RES_VUSB_3V1, TWL_DEV_GRP_P123, 0, 0),
+ /* Resource #20 USB charge pump skipped */
+ TWL_REMAP_SLEEP(RES_REGEN, TWL_DEV_GRP_P123, 2, 1),
+diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
+index 19d637266fcd..71e4f6ccae2f 100644
+--- a/drivers/mtd/ftl.c
++++ b/drivers/mtd/ftl.c
+@@ -1075,7 +1075,6 @@ static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+ return;
+ }
+
+- ftl_freepart(partition);
+ kfree(partition);
+ }
+
+diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
+index f0ed92e210a1..e2b9b345177a 100644
+--- a/drivers/mtd/nand/omap2.c
++++ b/drivers/mtd/nand/omap2.c
+@@ -931,7 +931,7 @@ static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u32 val;
+
+ val = readl(info->reg.gpmc_ecc_config);
+- if (((val >> ECC_CONFIG_CS_SHIFT) & ~CS_MASK) != info->gpmc_cs)
++ if (((val >> ECC_CONFIG_CS_SHIFT) & CS_MASK) != info->gpmc_cs)
+ return -EINVAL;
+
+ /* read ecc result */
+diff --git a/drivers/power/bq2415x_charger.c b/drivers/power/bq2415x_charger.c
+index 79a37f6d3307..e384844a1ae1 100644
+--- a/drivers/power/bq2415x_charger.c
++++ b/drivers/power/bq2415x_charger.c
+@@ -840,8 +840,7 @@ static int bq2415x_notifier_call(struct notifier_block *nb,
+ if (bq->automode < 1)
+ return NOTIFY_OK;
+
+- sysfs_notify(&bq->charger.dev->kobj, NULL, "reported_mode");
+- bq2415x_set_mode(bq, bq->reported_mode);
++ schedule_delayed_work(&bq->work, 0);
+
+ return NOTIFY_OK;
+ }
+@@ -892,6 +891,11 @@ static void bq2415x_timer_work(struct work_struct *work)
+ int error;
+ int boost;
+
++ if (bq->automode > 0 && (bq->reported_mode != bq->mode)) {
++ sysfs_notify(&bq->charger.dev->kobj, NULL, "reported_mode");
++ bq2415x_set_mode(bq, bq->reported_mode);
++ }
++
+ if (!bq->autotimer)
+ return;
+
+diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
+index 04f262a836b2..4c9db589f6c1 100644
+--- a/drivers/regulator/arizona-ldo1.c
++++ b/drivers/regulator/arizona-ldo1.c
+@@ -143,8 +143,6 @@ static struct regulator_ops arizona_ldo1_ops = {
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+- .get_bypass = regulator_get_bypass_regmap,
+- .set_bypass = regulator_set_bypass_regmap,
+ };
+
+ static const struct regulator_desc arizona_ldo1 = {
+diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
+index 9effe48c605e..8b7a0a9ebdfe 100644
+--- a/drivers/regulator/tps65218-regulator.c
++++ b/drivers/regulator/tps65218-regulator.c
+@@ -68,7 +68,7 @@ static const struct regulator_linear_range ldo1_dcdc3_ranges[] = {
+
+ static const struct regulator_linear_range dcdc4_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1175000, 0x0, 0xf, 25000),
+- REGULATOR_LINEAR_RANGE(1550000, 0x10, 0x34, 50000),
++ REGULATOR_LINEAR_RANGE(1600000, 0x10, 0x34, 50000),
+ };
+
+ static struct tps_info tps65218_pmic_regs[] = {
+diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
+index 2e28392c2fb6..a38aafa030b3 100644
+--- a/drivers/scsi/bfa/bfa_ioc.h
++++ b/drivers/scsi/bfa/bfa_ioc.h
+@@ -72,7 +72,7 @@ struct bfa_sge_s {
+ } while (0)
+
+ #define bfa_swap_words(_x) ( \
+- ((_x) << 32) | ((_x) >> 32))
++ ((u64)(_x) << 32) | ((u64)(_x) >> 32))
+
+ #ifdef __BIG_ENDIAN
+ #define bfa_sge_to_be(_x)
+diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
+index 88d46fe6bf98..769be4d50037 100644
+--- a/drivers/scsi/scsi.c
++++ b/drivers/scsi/scsi.c
+@@ -368,8 +368,8 @@ scsi_alloc_host_cmd_pool(struct Scsi_Host *shost)
+ if (!pool)
+ return NULL;
+
+- pool->cmd_name = kasprintf(GFP_KERNEL, "%s_cmd", hostt->name);
+- pool->sense_name = kasprintf(GFP_KERNEL, "%s_sense", hostt->name);
++ pool->cmd_name = kasprintf(GFP_KERNEL, "%s_cmd", hostt->proc_name);
++ pool->sense_name = kasprintf(GFP_KERNEL, "%s_sense", hostt->proc_name);
+ if (!pool->cmd_name || !pool->sense_name) {
+ scsi_free_host_cmd_pool(pool);
+ return NULL;
+@@ -380,6 +380,10 @@ scsi_alloc_host_cmd_pool(struct Scsi_Host *shost)
+ pool->slab_flags |= SLAB_CACHE_DMA;
+ pool->gfp_mask = __GFP_DMA;
+ }
++
++ if (hostt->cmd_size)
++ hostt->cmd_pool = pool;
++
+ return pool;
+ }
+
+@@ -424,8 +428,10 @@ out:
+ out_free_slab:
+ kmem_cache_destroy(pool->cmd_slab);
+ out_free_pool:
+- if (hostt->cmd_size)
++ if (hostt->cmd_size) {
+ scsi_free_host_cmd_pool(pool);
++ hostt->cmd_pool = NULL;
++ }
+ goto out;
+ }
+
+@@ -447,8 +453,10 @@ static void scsi_put_host_cmd_pool(struct Scsi_Host *shost)
+ if (!--pool->users) {
+ kmem_cache_destroy(pool->cmd_slab);
+ kmem_cache_destroy(pool->sense_slab);
+- if (hostt->cmd_size)
++ if (hostt->cmd_size) {
+ scsi_free_host_cmd_pool(pool);
++ hostt->cmd_pool = NULL;
++ }
+ }
+ mutex_unlock(&host_cmd_pool_mutex);
+ }
+diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
+index f969aca0b54e..49014a143c6a 100644
+--- a/drivers/scsi/scsi_devinfo.c
++++ b/drivers/scsi/scsi_devinfo.c
+@@ -222,6 +222,7 @@ static struct {
+ {"PIONEER", "CD-ROM DRM-602X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+ {"PIONEER", "CD-ROM DRM-604X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+ {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
++ {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
+ {"Promise", "", NULL, BLIST_SPARSELUN},
+ {"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
+ {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN},
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index e02b3aab56ce..a299b82e6b09 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -922,6 +922,12 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
+ if (*bflags & BLIST_USE_10_BYTE_MS)
+ sdev->use_10_for_ms = 1;
+
++ /* some devices don't like REPORT SUPPORTED OPERATION CODES
++ * and will simply timeout causing sd_mod init to take a very
++ * very long time */
++ if (*bflags & BLIST_NO_RSOC)
++ sdev->no_report_opcodes = 1;
++
+ /* set the device running here so that slave configure
+ * may do I/O */
+ ret = scsi_device_set_state(sdev, SDEV_RUNNING);
+@@ -950,7 +956,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
+
+ sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
+
+- if (*bflags & BLIST_SKIP_VPD_PAGES)
++ if (*bflags & BLIST_TRY_VPD_PAGES)
++ sdev->try_vpd_pages = 1;
++ else if (*bflags & BLIST_SKIP_VPD_PAGES)
+ sdev->skip_vpd_pages = 1;
+
+ transport_configure_device(&sdev->sdev_gendev);
+@@ -1239,6 +1247,12 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
+ max_dev_lun = min(8U, max_dev_lun);
+
+ /*
++ * Stop scanning at 255 unless BLIST_SCSI3LUN
++ */
++ if (!(bflags & BLIST_SCSI3LUN))
++ max_dev_lun = min(256U, max_dev_lun);
++
++ /*
+ * We have already scanned LUN 0, so start at LUN 1. Keep scanning
+ * until we reach the max, or no LUN is found and we are not
+ * sparse_lun.
+diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
+index 13e898332e45..a0c5bfdc5366 100644
+--- a/drivers/scsi/scsi_transport_srp.c
++++ b/drivers/scsi/scsi_transport_srp.c
+@@ -473,7 +473,8 @@ static void __srp_start_tl_fail_timers(struct srp_rport *rport)
+ if (delay > 0)
+ queue_delayed_work(system_long_wq, &rport->reconnect_work,
+ 1UL * delay * HZ);
+- if (srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) {
++ if ((fast_io_fail_tmo >= 0 || dev_loss_tmo >= 0) &&
++ srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) {
+ pr_debug("%s new state: %d\n", dev_name(&shost->shost_gendev),
+ rport->state);
+ scsi_target_block(&shost->shost_gendev);
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 6825eda1114a..ed2e99eca336 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2681,6 +2681,11 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
+
+ static int sd_try_extended_inquiry(struct scsi_device *sdp)
+ {
++ /* Attempt VPD inquiry if the device blacklist explicitly calls
++ * for it.
++ */
++ if (sdp->try_vpd_pages)
++ return 1;
+ /*
+ * Although VPD inquiries can go to SCSI-2 type devices,
+ * some USB ones crash on receiving them, and the pages
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 9969fa1ef7c4..ed0f899e8aa5 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -33,6 +33,7 @@
+ #include <linux/device.h>
+ #include <linux/hyperv.h>
+ #include <linux/mempool.h>
++#include <linux/blkdev.h>
+ #include <scsi/scsi.h>
+ #include <scsi/scsi_cmnd.h>
+ #include <scsi/scsi_host.h>
+@@ -330,17 +331,17 @@ static int storvsc_timeout = 180;
+
+ static void storvsc_on_channel_callback(void *context);
+
+-/*
+- * In Hyper-V, each port/path/target maps to 1 scsi host adapter. In
+- * reality, the path/target is not used (ie always set to 0) so our
+- * scsi host adapter essentially has 1 bus with 1 target that contains
+- * up to 256 luns.
+- */
+-#define STORVSC_MAX_LUNS_PER_TARGET 64
+-#define STORVSC_MAX_TARGETS 1
+-#define STORVSC_MAX_CHANNELS 1
++#define STORVSC_MAX_LUNS_PER_TARGET 255
++#define STORVSC_MAX_TARGETS 2
++#define STORVSC_MAX_CHANNELS 8
+
++#define STORVSC_FC_MAX_LUNS_PER_TARGET 255
++#define STORVSC_FC_MAX_TARGETS 128
++#define STORVSC_FC_MAX_CHANNELS 8
+
++#define STORVSC_IDE_MAX_LUNS_PER_TARGET 64
++#define STORVSC_IDE_MAX_TARGETS 1
++#define STORVSC_IDE_MAX_CHANNELS 1
+
+ struct storvsc_cmd_request {
+ struct list_head entry;
+@@ -1017,6 +1018,13 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
+ case ATA_12:
+ set_host_byte(scmnd, DID_PASSTHROUGH);
+ break;
++ /*
++ * On Some Windows hosts TEST_UNIT_READY command can return
++ * SRB_STATUS_ERROR, let the upper level code deal with it
++ * based on the sense information.
++ */
++ case TEST_UNIT_READY:
++ break;
+ default:
+ set_host_byte(scmnd, DID_TARGET_FAILURE);
+ }
+@@ -1518,6 +1526,16 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
+ return SUCCESS;
+ }
+
++/*
++ * The host guarantees to respond to each command, although I/O latencies might
++ * be unbounded on Azure. Reset the timer unconditionally to give the host a
++ * chance to perform EH.
++ */
++static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd)
++{
++ return BLK_EH_RESET_TIMER;
++}
++
+ static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
+ {
+ bool allowed = true;
+@@ -1553,9 +1571,19 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+ struct vmscsi_request *vm_srb;
+ struct stor_mem_pools *memp = scmnd->device->hostdata;
+
+- if (!storvsc_scsi_cmd_ok(scmnd)) {
+- scmnd->scsi_done(scmnd);
+- return 0;
++ if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) {
++ /*
++ * On legacy hosts filter unimplemented commands.
++ * Future hosts are expected to correctly handle
++ * unsupported commands. Furthermore, it is
++ * possible that some of the currently
++ * unsupported commands maybe supported in
++ * future versions of the host.
++ */
++ if (!storvsc_scsi_cmd_ok(scmnd)) {
++ scmnd->scsi_done(scmnd);
++ return 0;
++ }
+ }
+
+ request_size = sizeof(struct storvsc_cmd_request);
+@@ -1580,26 +1608,24 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+ vm_srb = &cmd_request->vstor_packet.vm_srb;
+ vm_srb->win8_extension.time_out_value = 60;
+
++ vm_srb->win8_extension.srb_flags |=
++ (SRB_FLAGS_QUEUE_ACTION_ENABLE |
++ SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
+
+ /* Build the SRB */
+ switch (scmnd->sc_data_direction) {
+ case DMA_TO_DEVICE:
+ vm_srb->data_in = WRITE_TYPE;
+ vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_OUT;
+- vm_srb->win8_extension.srb_flags |=
+- (SRB_FLAGS_QUEUE_ACTION_ENABLE |
+- SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
+ break;
+ case DMA_FROM_DEVICE:
+ vm_srb->data_in = READ_TYPE;
+ vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN;
+- vm_srb->win8_extension.srb_flags |=
+- (SRB_FLAGS_QUEUE_ACTION_ENABLE |
+- SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
+ break;
+ default:
+ vm_srb->data_in = UNKNOWN_TYPE;
+- vm_srb->win8_extension.srb_flags = 0;
++ vm_srb->win8_extension.srb_flags |= (SRB_FLAGS_DATA_IN |
++ SRB_FLAGS_DATA_OUT);
+ break;
+ }
+
+@@ -1687,11 +1713,11 @@ static struct scsi_host_template scsi_driver = {
+ .bios_param = storvsc_get_chs,
+ .queuecommand = storvsc_queuecommand,
+ .eh_host_reset_handler = storvsc_host_reset_handler,
++ .eh_timed_out = storvsc_eh_timed_out,
+ .slave_alloc = storvsc_device_alloc,
+ .slave_destroy = storvsc_device_destroy,
+ .slave_configure = storvsc_device_configure,
+- .cmd_per_lun = 1,
+- /* 64 max_queue * 1 target */
++ .cmd_per_lun = 255,
+ .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS,
+ .this_id = -1,
+ /* no use setting to 0 since ll_blk_rw reset it to 1 */
+@@ -1743,19 +1769,25 @@ static int storvsc_probe(struct hv_device *device,
+ * set state to properly communicate with the host.
+ */
+
+- if (vmbus_proto_version == VERSION_WIN8) {
+- sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE;
+- vmscsi_size_delta = 0;
+- vmstor_current_major = VMSTOR_WIN8_MAJOR;
+- vmstor_current_minor = VMSTOR_WIN8_MINOR;
+- } else {
++ switch (vmbus_proto_version) {
++ case VERSION_WS2008:
++ case VERSION_WIN7:
+ sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
+ vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
+ vmstor_current_major = VMSTOR_WIN7_MAJOR;
+ vmstor_current_minor = VMSTOR_WIN7_MINOR;
++ break;
++ default:
++ sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE;
++ vmscsi_size_delta = 0;
++ vmstor_current_major = VMSTOR_WIN8_MAJOR;
++ vmstor_current_minor = VMSTOR_WIN8_MINOR;
++ break;
+ }
+
+-
++ if (dev_id->driver_data == SFC_GUID)
++ scsi_driver.can_queue = (STORVSC_MAX_IO_REQUESTS *
++ STORVSC_FC_MAX_TARGETS);
+ host = scsi_host_alloc(&scsi_driver,
+ sizeof(struct hv_host_device));
+ if (!host)
+@@ -1789,12 +1821,25 @@ static int storvsc_probe(struct hv_device *device,
+ host_dev->path = stor_device->path_id;
+ host_dev->target = stor_device->target_id;
+
+- /* max # of devices per target */
+- host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
+- /* max # of targets per channel */
+- host->max_id = STORVSC_MAX_TARGETS;
+- /* max # of channels */
+- host->max_channel = STORVSC_MAX_CHANNELS - 1;
++ switch (dev_id->driver_data) {
++ case SFC_GUID:
++ host->max_lun = STORVSC_FC_MAX_LUNS_PER_TARGET;
++ host->max_id = STORVSC_FC_MAX_TARGETS;
++ host->max_channel = STORVSC_FC_MAX_CHANNELS - 1;
++ break;
++
++ case SCSI_GUID:
++ host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
++ host->max_id = STORVSC_MAX_TARGETS;
++ host->max_channel = STORVSC_MAX_CHANNELS - 1;
++ break;
++
++ default:
++ host->max_lun = STORVSC_IDE_MAX_LUNS_PER_TARGET;
++ host->max_id = STORVSC_IDE_MAX_TARGETS;
++ host->max_channel = STORVSC_IDE_MAX_CHANNELS - 1;
++ break;
++ }
+ /* max cmd length */
+ host->max_cmd_len = STORVSC_MAX_CMD_LEN;
+
+diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
+index 4dc77df38864..68441fa448de 100644
+--- a/drivers/spi/spi-omap2-mcspi.c
++++ b/drivers/spi/spi-omap2-mcspi.c
+@@ -149,6 +149,7 @@ struct omap2_mcspi_cs {
+ void __iomem *base;
+ unsigned long phys;
+ int word_len;
++ u16 mode;
+ struct list_head node;
+ /* Context save and restore shadow register */
+ u32 chconf0, chctrl0;
+@@ -926,6 +927,8 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
+
+ mcspi_write_chconf0(spi, l);
+
++ cs->mode = spi->mode;
++
+ dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
+ speed_hz,
+ (spi->mode & SPI_CPHA) ? "trailing" : "leading",
+@@ -998,6 +1001,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
+ return -ENOMEM;
+ cs->base = mcspi->base + spi->chip_select * 0x14;
+ cs->phys = mcspi->phys + spi->chip_select * 0x14;
++ cs->mode = 0;
+ cs->chconf0 = 0;
+ cs->chctrl0 = 0;
+ spi->controller_state = cs;
+@@ -1079,6 +1083,16 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
+ cs = spi->controller_state;
+ cd = spi->controller_data;
+
++ /*
++ * The slave driver could have changed spi->mode in which case
++ * it will be different from cs->mode (the current hardware setup).
++ * If so, set par_override (even though its not a parity issue) so
++ * omap2_mcspi_setup_transfer will be called to configure the hardware
++ * with the correct mode on the first iteration of the loop below.
++ */
++ if (spi->mode != cs->mode)
++ par_override = 1;
++
+ omap2_mcspi_set_enable(spi, 0);
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
+diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
+index d018a4aac3a1..c206a4ad83cd 100644
+--- a/drivers/spi/spi-orion.c
++++ b/drivers/spi/spi-orion.c
+@@ -346,8 +346,6 @@ static int orion_spi_probe(struct platform_device *pdev)
+ struct resource *r;
+ unsigned long tclk_hz;
+ int status = 0;
+- const u32 *iprop;
+- int size;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*spi));
+ if (master == NULL) {
+@@ -358,10 +356,10 @@ static int orion_spi_probe(struct platform_device *pdev)
+ if (pdev->id != -1)
+ master->bus_num = pdev->id;
+ if (pdev->dev.of_node) {
+- iprop = of_get_property(pdev->dev.of_node, "cell-index",
+- &size);
+- if (iprop && size == sizeof(*iprop))
+- master->bus_num = *iprop;
++ u32 cell_index;
++ if (!of_property_read_u32(pdev->dev.of_node, "cell-index",
++ &cell_index))
++ master->bus_num = cell_index;
+ }
+
+ /* we support only mode 0, and no options */
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index fe792106bdc5..46f45ca2c694 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -1074,6 +1074,7 @@ static struct acpi_device_id pxa2xx_spi_acpi_match[] = {
+ { "INT3430", 0 },
+ { "INT3431", 0 },
+ { "80860F0E", 0 },
++ { "8086228E", 0 },
+ { },
+ };
+ MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
+diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
+index 500713882ad5..48dcb2e97b90 100644
+--- a/drivers/xen/events/events_fifo.c
++++ b/drivers/xen/events/events_fifo.c
+@@ -99,6 +99,25 @@ static unsigned evtchn_fifo_nr_channels(void)
+ return event_array_pages * EVENT_WORDS_PER_PAGE;
+ }
+
++static int init_control_block(int cpu,
++ struct evtchn_fifo_control_block *control_block)
++{
++ struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
++ struct evtchn_init_control init_control;
++ unsigned int i;
++
++ /* Reset the control block and the local HEADs. */
++ clear_page(control_block);
++ for (i = 0; i < EVTCHN_FIFO_MAX_QUEUES; i++)
++ q->head[i] = 0;
++
++ init_control.control_gfn = virt_to_mfn(control_block);
++ init_control.offset = 0;
++ init_control.vcpu = cpu;
++
++ return HYPERVISOR_event_channel_op(EVTCHNOP_init_control, &init_control);
++}
++
+ static void free_unused_array_pages(void)
+ {
+ unsigned i;
+@@ -323,7 +342,6 @@ static void evtchn_fifo_resume(void)
+
+ for_each_possible_cpu(cpu) {
+ void *control_block = per_cpu(cpu_control_block, cpu);
+- struct evtchn_init_control init_control;
+ int ret;
+
+ if (!control_block)
+@@ -340,12 +358,7 @@ static void evtchn_fifo_resume(void)
+ continue;
+ }
+
+- init_control.control_gfn = virt_to_mfn(control_block);
+- init_control.offset = 0;
+- init_control.vcpu = cpu;
+-
+- ret = HYPERVISOR_event_channel_op(EVTCHNOP_init_control,
+- &init_control);
++ ret = init_control_block(cpu, control_block);
+ if (ret < 0)
+ BUG();
+ }
+@@ -373,30 +386,25 @@ static const struct evtchn_ops evtchn_ops_fifo = {
+ .resume = evtchn_fifo_resume,
+ };
+
+-static int evtchn_fifo_init_control_block(unsigned cpu)
++static int evtchn_fifo_alloc_control_block(unsigned cpu)
+ {
+- struct page *control_block = NULL;
+- struct evtchn_init_control init_control;
++ void *control_block = NULL;
+ int ret = -ENOMEM;
+
+- control_block = alloc_page(GFP_KERNEL|__GFP_ZERO);
++ control_block = (void *)__get_free_page(GFP_KERNEL);
+ if (control_block == NULL)
+ goto error;
+
+- init_control.control_gfn = virt_to_mfn(page_address(control_block));
+- init_control.offset = 0;
+- init_control.vcpu = cpu;
+-
+- ret = HYPERVISOR_event_channel_op(EVTCHNOP_init_control, &init_control);
++ ret = init_control_block(cpu, control_block);
+ if (ret < 0)
+ goto error;
+
+- per_cpu(cpu_control_block, cpu) = page_address(control_block);
++ per_cpu(cpu_control_block, cpu) = control_block;
+
+ return 0;
+
+ error:
+- __free_page(control_block);
++ free_page((unsigned long)control_block);
+ return ret;
+ }
+
+@@ -410,7 +418,7 @@ static int evtchn_fifo_cpu_notification(struct notifier_block *self,
+ switch (action) {
+ case CPU_UP_PREPARE:
+ if (!per_cpu(cpu_control_block, cpu))
+- ret = evtchn_fifo_init_control_block(cpu);
++ ret = evtchn_fifo_alloc_control_block(cpu);
+ break;
+ default:
+ break;
+@@ -427,7 +435,7 @@ int __init xen_evtchn_fifo_init(void)
+ int cpu = get_cpu();
+ int ret;
+
+- ret = evtchn_fifo_init_control_block(cpu);
++ ret = evtchn_fifo_alloc_control_block(cpu);
+ if (ret < 0)
+ goto out;
+
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index de6aed8c78e5..c97fd86cfb1b 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -70,11 +70,6 @@
+ #define SERVER_NAME_LENGTH 40
+ #define SERVER_NAME_LEN_WITH_NULL (SERVER_NAME_LENGTH + 1)
+
+-/* used to define string lengths for reversing unicode strings */
+-/* (256+1)*2 = 514 */
+-/* (max path length + 1 for null) * 2 for unicode */
+-#define MAX_NAME 514
+-
+ /* SMB echo "timeout" -- FIXME: tunable? */
+ #define SMB_ECHO_INTERVAL (60 * HZ)
+
+@@ -404,6 +399,8 @@ struct smb_version_operations {
+ const struct cifs_fid *, u32 *);
+ int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *,
+ int);
++ /* check if we need to issue closedir */
++ bool (*dir_needs_close)(struct cifsFileInfo *);
+ };
+
+ struct smb_version_values {
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index e90a1e9aa627..9de08c9dd106 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -762,7 +762,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
+
+ cifs_dbg(FYI, "Freeing private data in close dir\n");
+ spin_lock(&cifs_file_list_lock);
+- if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
++ if (server->ops->dir_needs_close(cfile)) {
+ cfile->invalidHandle = true;
+ spin_unlock(&cifs_file_list_lock);
+ if (server->ops->close_dir)
+@@ -2823,7 +2823,7 @@ cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
+ total_read += result;
+ }
+
+- return total_read > 0 ? total_read : result;
++ return total_read > 0 && result != -EAGAIN ? total_read : result;
+ }
+
+ ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
+@@ -3231,7 +3231,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
+ total_read += result;
+ }
+
+- return total_read > 0 ? total_read : result;
++ return total_read > 0 && result != -EAGAIN ? total_read : result;
+ }
+
+ static int cifs_readpages(struct file *file, struct address_space *mapping,
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index a174605f6afa..d322e7d4e123 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -1710,13 +1710,22 @@ cifs_rename(struct inode *source_dir, struct dentry *source_dentry,
+ unlink_target:
+ /* Try unlinking the target dentry if it's not negative */
+ if (target_dentry->d_inode && (rc == -EACCES || rc == -EEXIST)) {
+- tmprc = cifs_unlink(target_dir, target_dentry);
++ if (d_is_dir(target_dentry))
++ tmprc = cifs_rmdir(target_dir, target_dentry);
++ else
++ tmprc = cifs_unlink(target_dir, target_dentry);
+ if (tmprc)
+ goto cifs_rename_exit;
+ rc = cifs_do_rename(xid, source_dentry, from_name,
+ target_dentry, to_name);
+ }
+
++ /* force revalidate to go get info when needed */
++ CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0;
++
++ source_dir->i_ctime = source_dir->i_mtime = target_dir->i_ctime =
++ target_dir->i_mtime = current_fs_time(source_dir->i_sb);
++
+ cifs_rename_exit:
+ kfree(info_buf_source);
+ kfree(from_name);
+diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
+index b15862e0f68c..b334a89d6a66 100644
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -593,11 +593,11 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
+ /* close and restart search */
+ cifs_dbg(FYI, "search backing up - close and restart search\n");
+ spin_lock(&cifs_file_list_lock);
+- if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
++ if (server->ops->dir_needs_close(cfile)) {
+ cfile->invalidHandle = true;
+ spin_unlock(&cifs_file_list_lock);
+- if (server->ops->close)
+- server->ops->close(xid, tcon, &cfile->fid);
++ if (server->ops->close_dir)
++ server->ops->close_dir(xid, tcon, &cfile->fid);
+ } else
+ spin_unlock(&cifs_file_list_lock);
+ if (cfile->srch_inf.ntwrk_buf_start) {
+diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
+index d1fdfa848703..84ca0a4caaeb 100644
+--- a/fs/cifs/smb1ops.c
++++ b/fs/cifs/smb1ops.c
+@@ -1009,6 +1009,12 @@ cifs_is_read_op(__u32 oplock)
+ return oplock == OPLOCK_READ;
+ }
+
++static bool
++cifs_dir_needs_close(struct cifsFileInfo *cfile)
++{
++ return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle;
++}
++
+ struct smb_version_operations smb1_operations = {
+ .send_cancel = send_nt_cancel,
+ .compare_fids = cifs_compare_fids,
+@@ -1078,6 +1084,7 @@ struct smb_version_operations smb1_operations = {
+ .query_mf_symlink = cifs_query_mf_symlink,
+ .create_mf_symlink = cifs_create_mf_symlink,
+ .is_read_op = cifs_is_read_op,
++ .dir_needs_close = cifs_dir_needs_close,
+ #ifdef CONFIG_CIFS_XATTR
+ .query_all_EAs = CIFSSMBQAllEAs,
+ .set_EA = CIFSSMBSetEA,
+diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
+index 3f17b4550831..45992944e238 100644
+--- a/fs/cifs/smb2file.c
++++ b/fs/cifs/smb2file.c
+@@ -50,7 +50,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
+ goto out;
+ }
+
+- smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
++ smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
+ GFP_KERNEL);
+ if (smb2_data == NULL) {
+ rc = -ENOMEM;
+diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
+index 84c012a6aba0..215f8d3e3e53 100644
+--- a/fs/cifs/smb2inode.c
++++ b/fs/cifs/smb2inode.c
+@@ -131,7 +131,7 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ *adjust_tz = false;
+ *symlink = false;
+
+- smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
++ smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
+ GFP_KERNEL);
+ if (smb2_data == NULL)
+ return -ENOMEM;
+diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
+index 94bd4fbb13d3..a689514e260f 100644
+--- a/fs/cifs/smb2maperror.c
++++ b/fs/cifs/smb2maperror.c
+@@ -214,7 +214,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
+ {STATUS_BREAKPOINT, -EIO, "STATUS_BREAKPOINT"},
+ {STATUS_SINGLE_STEP, -EIO, "STATUS_SINGLE_STEP"},
+ {STATUS_BUFFER_OVERFLOW, -EIO, "STATUS_BUFFER_OVERFLOW"},
+- {STATUS_NO_MORE_FILES, -EIO, "STATUS_NO_MORE_FILES"},
++ {STATUS_NO_MORE_FILES, -ENODATA, "STATUS_NO_MORE_FILES"},
+ {STATUS_WAKE_SYSTEM_DEBUGGER, -EIO, "STATUS_WAKE_SYSTEM_DEBUGGER"},
+ {STATUS_HANDLES_CLOSED, -EIO, "STATUS_HANDLES_CLOSED"},
+ {STATUS_NO_INHERITANCE, -EIO, "STATUS_NO_INHERITANCE"},
+@@ -605,7 +605,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
+ {STATUS_MAPPED_FILE_SIZE_ZERO, -EIO, "STATUS_MAPPED_FILE_SIZE_ZERO"},
+ {STATUS_TOO_MANY_OPENED_FILES, -EMFILE, "STATUS_TOO_MANY_OPENED_FILES"},
+ {STATUS_CANCELLED, -EIO, "STATUS_CANCELLED"},
+- {STATUS_CANNOT_DELETE, -EIO, "STATUS_CANNOT_DELETE"},
++ {STATUS_CANNOT_DELETE, -EACCES, "STATUS_CANNOT_DELETE"},
+ {STATUS_INVALID_COMPUTER_NAME, -EIO, "STATUS_INVALID_COMPUTER_NAME"},
+ {STATUS_FILE_DELETED, -EIO, "STATUS_FILE_DELETED"},
+ {STATUS_SPECIAL_ACCOUNT, -EIO, "STATUS_SPECIAL_ACCOUNT"},
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 787844bde384..f325c59e12e6 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -339,7 +339,7 @@ smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
+ int rc;
+ struct smb2_file_all_info *smb2_data;
+
+- smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
++ smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
+ GFP_KERNEL);
+ if (smb2_data == NULL)
+ return -ENOMEM;
+@@ -1104,6 +1104,12 @@ smb3_parse_lease_buf(void *buf, unsigned int *epoch)
+ return le32_to_cpu(lc->lcontext.LeaseState);
+ }
+
++static bool
++smb2_dir_needs_close(struct cifsFileInfo *cfile)
++{
++ return !cfile->invalidHandle;
++}
++
+ struct smb_version_operations smb20_operations = {
+ .compare_fids = smb2_compare_fids,
+ .setup_request = smb2_setup_request,
+@@ -1177,6 +1183,7 @@ struct smb_version_operations smb20_operations = {
+ .create_lease_buf = smb2_create_lease_buf,
+ .parse_lease_buf = smb2_parse_lease_buf,
+ .clone_range = smb2_clone_range,
++ .dir_needs_close = smb2_dir_needs_close,
+ };
+
+ struct smb_version_operations smb21_operations = {
+@@ -1252,6 +1259,7 @@ struct smb_version_operations smb21_operations = {
+ .create_lease_buf = smb2_create_lease_buf,
+ .parse_lease_buf = smb2_parse_lease_buf,
+ .clone_range = smb2_clone_range,
++ .dir_needs_close = smb2_dir_needs_close,
+ };
+
+ struct smb_version_operations smb30_operations = {
+@@ -1330,6 +1338,7 @@ struct smb_version_operations smb30_operations = {
+ .parse_lease_buf = smb3_parse_lease_buf,
+ .clone_range = smb2_clone_range,
+ .validate_negotiate = smb3_validate_negotiate,
++ .dir_needs_close = smb2_dir_needs_close,
+ };
+
+ struct smb_version_values smb20_values = {
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index b0b260dbb19d..87077559a0ab 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -922,7 +922,8 @@ tcon_exit:
+ tcon_error_exit:
+ if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
+ cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
+- tcon->bad_network_name = true;
++ if (tcon)
++ tcon->bad_network_name = true;
+ }
+ goto tcon_exit;
+ }
+@@ -1545,7 +1546,7 @@ SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
+ {
+ return query_info(xid, tcon, persistent_fid, volatile_fid,
+ FILE_ALL_INFORMATION,
+- sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
++ sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
+ sizeof(struct smb2_file_all_info), data);
+ }
+
+@@ -2141,6 +2142,10 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
+ rsp = (struct smb2_query_directory_rsp *)iov[0].iov_base;
+
+ if (rc) {
++ if (rc == -ENODATA && rsp->hdr.Status == STATUS_NO_MORE_FILES) {
++ srch_inf->endOfSearch = true;
++ rc = 0;
++ }
+ cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
+ goto qdir_exit;
+ }
+@@ -2178,11 +2183,6 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
+ else
+ cifs_dbg(VFS, "illegal search buffer type\n");
+
+- if (rsp->hdr.Status == STATUS_NO_MORE_FILES)
+- srch_inf->endOfSearch = 1;
+- else
+- srch_inf->endOfSearch = 0;
+-
+ return rc;
+
+ qdir_exit:
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 06f65857a855..e1308c5423ed 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -106,8 +106,7 @@ static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
+ unsigned int hash)
+ {
+ hash += (unsigned long) parent / L1_CACHE_BYTES;
+- hash = hash + (hash >> d_hash_shift);
+- return dentry_hashtable + (hash & d_hash_mask);
++ return dentry_hashtable + hash_32(hash, d_hash_shift);
+ }
+
+ /* Statistics gathering. */
+diff --git a/fs/namei.c b/fs/namei.c
+index 9eb787e5c167..17ca8b85c308 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -34,6 +34,7 @@
+ #include <linux/device_cgroup.h>
+ #include <linux/fs_struct.h>
+ #include <linux/posix_acl.h>
++#include <linux/hash.h>
+ #include <asm/uaccess.h>
+
+ #include "internal.h"
+@@ -1629,8 +1630,7 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
+
+ static inline unsigned int fold_hash(unsigned long hash)
+ {
+- hash += hash >> (8*sizeof(int));
+- return hash;
++ return hash_64(hash, 32);
+ }
+
+ #else /* 32-bit case */
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 182bc41cd887..140d17705683 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -779,6 +779,20 @@ static void attach_mnt(struct mount *mnt,
+ list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
+ }
+
++static void attach_shadowed(struct mount *mnt,
++ struct mount *parent,
++ struct mount *shadows)
++{
++ if (shadows) {
++ hlist_add_after_rcu(&shadows->mnt_hash, &mnt->mnt_hash);
++ list_add(&mnt->mnt_child, &shadows->mnt_child);
++ } else {
++ hlist_add_head_rcu(&mnt->mnt_hash,
++ m_hash(&parent->mnt, mnt->mnt_mountpoint));
++ list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
++ }
++}
++
+ /*
+ * vfsmount lock must be held for write
+ */
+@@ -797,12 +811,7 @@ static void commit_tree(struct mount *mnt, struct mount *shadows)
+
+ list_splice(&head, n->list.prev);
+
+- if (shadows)
+- hlist_add_after_rcu(&shadows->mnt_hash, &mnt->mnt_hash);
+- else
+- hlist_add_head_rcu(&mnt->mnt_hash,
+- m_hash(&parent->mnt, mnt->mnt_mountpoint));
+- list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
++ attach_shadowed(mnt, parent, shadows);
+ touch_mnt_namespace(n);
+ }
+
+@@ -890,8 +899,21 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
+
+ mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);
+ /* Don't allow unprivileged users to change mount flags */
+- if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY))
+- mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
++ if (flag & CL_UNPRIVILEGED) {
++ mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
++
++ if (mnt->mnt.mnt_flags & MNT_READONLY)
++ mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
++
++ if (mnt->mnt.mnt_flags & MNT_NODEV)
++ mnt->mnt.mnt_flags |= MNT_LOCK_NODEV;
++
++ if (mnt->mnt.mnt_flags & MNT_NOSUID)
++ mnt->mnt.mnt_flags |= MNT_LOCK_NOSUID;
++
++ if (mnt->mnt.mnt_flags & MNT_NOEXEC)
++ mnt->mnt.mnt_flags |= MNT_LOCK_NOEXEC;
++ }
+
+ /* Don't allow unprivileged users to reveal what is under a mount */
+ if ((flag & CL_UNPRIVILEGED) && list_empty(&old->mnt_expire))
+@@ -1213,6 +1235,11 @@ static void namespace_unlock(void)
+ head.first->pprev = &head.first;
+ INIT_HLIST_HEAD(&unmounted);
+
++ /* undo decrements we'd done in umount_tree() */
++ hlist_for_each_entry(mnt, &head, mnt_hash)
++ if (mnt->mnt_ex_mountpoint.mnt)
++ mntget(mnt->mnt_ex_mountpoint.mnt);
++
+ up_write(&namespace_sem);
+
+ synchronize_rcu();
+@@ -1249,6 +1276,9 @@ void umount_tree(struct mount *mnt, int how)
+ hlist_add_head(&p->mnt_hash, &tmp_list);
+ }
+
++ hlist_for_each_entry(p, &tmp_list, mnt_hash)
++ list_del_init(&p->mnt_child);
++
+ if (how)
+ propagate_umount(&tmp_list);
+
+@@ -1259,9 +1289,9 @@ void umount_tree(struct mount *mnt, int how)
+ p->mnt_ns = NULL;
+ if (how < 2)
+ p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
+- list_del_init(&p->mnt_child);
+ if (mnt_has_parent(p)) {
+ put_mountpoint(p->mnt_mp);
++ mnt_add_count(p->mnt_parent, -1);
+ /* move the reference to mountpoint into ->mnt_ex_mountpoint */
+ p->mnt_ex_mountpoint.dentry = p->mnt_mountpoint;
+ p->mnt_ex_mountpoint.mnt = &p->mnt_parent->mnt;
+@@ -1492,6 +1522,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
+ continue;
+
+ for (s = r; s; s = next_mnt(s, r)) {
++ struct mount *t = NULL;
+ if (!(flag & CL_COPY_UNBINDABLE) &&
+ IS_MNT_UNBINDABLE(s)) {
+ s = skip_mnt_tree(s);
+@@ -1513,7 +1544,14 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
+ goto out;
+ lock_mount_hash();
+ list_add_tail(&q->mnt_list, &res->mnt_list);
+- attach_mnt(q, parent, p->mnt_mp);
++ mnt_set_mountpoint(parent, p->mnt_mp, q);
++ if (!list_empty(&parent->mnt_mounts)) {
++ t = list_last_entry(&parent->mnt_mounts,
++ struct mount, mnt_child);
++ if (t->mnt_mp != p->mnt_mp)
++ t = NULL;
++ }
++ attach_shadowed(q, parent, t);
+ unlock_mount_hash();
+ }
+ }
+@@ -1896,9 +1934,6 @@ static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
+ if (readonly_request == __mnt_is_readonly(mnt))
+ return 0;
+
+- if (mnt->mnt_flags & MNT_LOCK_READONLY)
+- return -EPERM;
+-
+ if (readonly_request)
+ error = mnt_make_readonly(real_mount(mnt));
+ else
+@@ -1924,6 +1959,33 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
+ if (path->dentry != path->mnt->mnt_root)
+ return -EINVAL;
+
++ /* Don't allow changing of locked mnt flags.
++ *
++ * No locks need to be held here while testing the various
++ * MNT_LOCK flags because those flags can never be cleared
++ * once they are set.
++ */
++ if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) &&
++ !(mnt_flags & MNT_READONLY)) {
++ return -EPERM;
++ }
++ if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) &&
++ !(mnt_flags & MNT_NODEV)) {
++ return -EPERM;
++ }
++ if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) &&
++ !(mnt_flags & MNT_NOSUID)) {
++ return -EPERM;
++ }
++ if ((mnt->mnt.mnt_flags & MNT_LOCK_NOEXEC) &&
++ !(mnt_flags & MNT_NOEXEC)) {
++ return -EPERM;
++ }
++ if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) &&
++ ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) {
++ return -EPERM;
++ }
++
+ err = security_sb_remount(sb, data);
+ if (err)
+ return err;
+@@ -1937,7 +1999,7 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
+ err = do_remount_sb(sb, flags, data, 0);
+ if (!err) {
+ lock_mount_hash();
+- mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
++ mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
+ mnt->mnt.mnt_flags = mnt_flags;
+ touch_mnt_namespace(mnt->mnt_ns);
+ unlock_mount_hash();
+@@ -2122,7 +2184,7 @@ static int do_new_mount(struct path *path, const char *fstype, int flags,
+ */
+ if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) {
+ flags |= MS_NODEV;
+- mnt_flags |= MNT_NODEV;
++ mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
+ }
+ }
+
+@@ -2436,6 +2498,14 @@ long do_mount(const char *dev_name, const char *dir_name,
+ if (flags & MS_RDONLY)
+ mnt_flags |= MNT_READONLY;
+
++ /* The default atime for remount is preservation */
++ if ((flags & MS_REMOUNT) &&
++ ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME |
++ MS_STRICTATIME)) == 0)) {
++ mnt_flags &= ~MNT_ATIME_MASK;
++ mnt_flags |= path.mnt->mnt_flags & MNT_ATIME_MASK;
++ }
++
+ flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
+ MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
+ MS_STRICTATIME);
+diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
+index ee9cb3795c2b..7e948ffba461 100644
+--- a/fs/notify/fanotify/fanotify.c
++++ b/fs/notify/fanotify/fanotify.c
+@@ -70,8 +70,15 @@ static int fanotify_get_response(struct fsnotify_group *group,
+ wait_event(group->fanotify_data.access_waitq, event->response ||
+ atomic_read(&group->fanotify_data.bypass_perm));
+
+- if (!event->response) /* bypass_perm set */
++ if (!event->response) { /* bypass_perm set */
++ /*
++ * Event was canceled because group is being destroyed. Remove
++ * it from group's event list because we are responsible for
++ * freeing the permission event.
++ */
++ fsnotify_remove_event(group, &event->fae.fse);
+ return 0;
++ }
+
+ /* userspace responded, convert to something usable */
+ switch (event->response) {
+diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
+index 3fdc8a3e1134..2685bc9ea2c9 100644
+--- a/fs/notify/fanotify/fanotify_user.c
++++ b/fs/notify/fanotify/fanotify_user.c
+@@ -359,6 +359,11 @@ static int fanotify_release(struct inode *ignored, struct file *file)
+ #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ struct fanotify_perm_event_info *event, *next;
+
++ /*
++ * There may be still new events arriving in the notification queue
++ * but since userspace cannot use fanotify fd anymore, no event can
++ * enter or leave access_list by now.
++ */
+ spin_lock(&group->fanotify_data.access_lock);
+
+ atomic_inc(&group->fanotify_data.bypass_perm);
+@@ -373,6 +378,13 @@ static int fanotify_release(struct inode *ignored, struct file *file)
+ }
+ spin_unlock(&group->fanotify_data.access_lock);
+
++ /*
++ * Since bypass_perm is set, newly queued events will not wait for
++ * access response. Wake up the already sleeping ones now.
++ * synchronize_srcu() in fsnotify_destroy_group() will wait for all
++ * processes sleeping in fanotify_handle_event() waiting for access
++ * response and thus also for all permission events to be freed.
++ */
+ wake_up(&group->fanotify_data.access_waitq);
+ #endif
+
+diff --git a/fs/notify/notification.c b/fs/notify/notification.c
+index 1e58402171a5..25a07c70f1c9 100644
+--- a/fs/notify/notification.c
++++ b/fs/notify/notification.c
+@@ -73,7 +73,8 @@ void fsnotify_destroy_event(struct fsnotify_group *group,
+ /* Overflow events are per-group and we don't want to free them */
+ if (!event || event->mask == FS_Q_OVERFLOW)
+ return;
+-
++ /* If the event is still queued, we have a problem... */
++ WARN_ON(!list_empty(&event->list));
+ group->ops->free_event(event);
+ }
+
+@@ -125,6 +126,21 @@ queue:
+ }
+
+ /*
++ * Remove @event from group's notification queue. It is the responsibility of
++ * the caller to destroy the event.
++ */
++void fsnotify_remove_event(struct fsnotify_group *group,
++ struct fsnotify_event *event)
++{
++ mutex_lock(&group->notification_mutex);
++ if (!list_empty(&event->list)) {
++ list_del_init(&event->list);
++ group->q_len--;
++ }
++ mutex_unlock(&group->notification_mutex);
++}
++
++/*
+ * Remove and return the first event from the notification list. It is the
+ * responsibility of the caller to destroy the obtained event
+ */
+diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
+index 6f66b3751ace..53e6c40ed4c6 100644
+--- a/fs/ocfs2/ioctl.c
++++ b/fs/ocfs2/ioctl.c
+@@ -35,9 +35,8 @@
+ copy_to_user((typeof(a) __user *)b, &(a), sizeof(a))
+
+ /*
+- * This call is void because we are already reporting an error that may
+- * be -EFAULT. The error will be returned from the ioctl(2) call. It's
+- * just a best-effort to tell userspace that this request caused the error.
++ * This is just a best-effort to tell userspace that this request
++ * caused the error.
+ */
+ static inline void o2info_set_request_error(struct ocfs2_info_request *kreq,
+ struct ocfs2_info_request __user *req)
+@@ -146,136 +145,105 @@ bail:
+ static int ocfs2_info_handle_blocksize(struct inode *inode,
+ struct ocfs2_info_request __user *req)
+ {
+- int status = -EFAULT;
+ struct ocfs2_info_blocksize oib;
+
+ if (o2info_from_user(oib, req))
+- goto bail;
++ return -EFAULT;
+
+ oib.ib_blocksize = inode->i_sb->s_blocksize;
+
+ o2info_set_request_filled(&oib.ib_req);
+
+ if (o2info_to_user(oib, req))
+- goto bail;
+-
+- status = 0;
+-bail:
+- if (status)
+- o2info_set_request_error(&oib.ib_req, req);
++ return -EFAULT;
+
+- return status;
++ return 0;
+ }
+
+ static int ocfs2_info_handle_clustersize(struct inode *inode,
+ struct ocfs2_info_request __user *req)
+ {
+- int status = -EFAULT;
+ struct ocfs2_info_clustersize oic;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ if (o2info_from_user(oic, req))
+- goto bail;
++ return -EFAULT;
+
+ oic.ic_clustersize = osb->s_clustersize;
+
+ o2info_set_request_filled(&oic.ic_req);
+
+ if (o2info_to_user(oic, req))
+- goto bail;
+-
+- status = 0;
+-bail:
+- if (status)
+- o2info_set_request_error(&oic.ic_req, req);
++ return -EFAULT;
+
+- return status;
++ return 0;
+ }
+
+ static int ocfs2_info_handle_maxslots(struct inode *inode,
+ struct ocfs2_info_request __user *req)
+ {
+- int status = -EFAULT;
+ struct ocfs2_info_maxslots oim;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ if (o2info_from_user(oim, req))
+- goto bail;
++ return -EFAULT;
+
+ oim.im_max_slots = osb->max_slots;
+
+ o2info_set_request_filled(&oim.im_req);
+
+ if (o2info_to_user(oim, req))
+- goto bail;
++ return -EFAULT;
+
+- status = 0;
+-bail:
+- if (status)
+- o2info_set_request_error(&oim.im_req, req);
+-
+- return status;
++ return 0;
+ }
+
+ static int ocfs2_info_handle_label(struct inode *inode,
+ struct ocfs2_info_request __user *req)
+ {
+- int status = -EFAULT;
+ struct ocfs2_info_label oil;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ if (o2info_from_user(oil, req))
+- goto bail;
++ return -EFAULT;
+
+ memcpy(oil.il_label, osb->vol_label, OCFS2_MAX_VOL_LABEL_LEN);
+
+ o2info_set_request_filled(&oil.il_req);
+
+ if (o2info_to_user(oil, req))
+- goto bail;
++ return -EFAULT;
+
+- status = 0;
+-bail:
+- if (status)
+- o2info_set_request_error(&oil.il_req, req);
+-
+- return status;
++ return 0;
+ }
+
+ static int ocfs2_info_handle_uuid(struct inode *inode,
+ struct ocfs2_info_request __user *req)
+ {
+- int status = -EFAULT;
+ struct ocfs2_info_uuid oiu;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ if (o2info_from_user(oiu, req))
+- goto bail;
++ return -EFAULT;
+
+ memcpy(oiu.iu_uuid_str, osb->uuid_str, OCFS2_TEXT_UUID_LEN + 1);
+
+ o2info_set_request_filled(&oiu.iu_req);
+
+ if (o2info_to_user(oiu, req))
+- goto bail;
+-
+- status = 0;
+-bail:
+- if (status)
+- o2info_set_request_error(&oiu.iu_req, req);
++ return -EFAULT;
+
+- return status;
++ return 0;
+ }
+
+ static int ocfs2_info_handle_fs_features(struct inode *inode,
+ struct ocfs2_info_request __user *req)
+ {
+- int status = -EFAULT;
+ struct ocfs2_info_fs_features oif;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ if (o2info_from_user(oif, req))
+- goto bail;
++ return -EFAULT;
+
+ oif.if_compat_features = osb->s_feature_compat;
+ oif.if_incompat_features = osb->s_feature_incompat;
+@@ -284,39 +252,28 @@ static int ocfs2_info_handle_fs_features(struct inode *inode,
+ o2info_set_request_filled(&oif.if_req);
+
+ if (o2info_to_user(oif, req))
+- goto bail;
++ return -EFAULT;
+
+- status = 0;
+-bail:
+- if (status)
+- o2info_set_request_error(&oif.if_req, req);
+-
+- return status;
++ return 0;
+ }
+
+ static int ocfs2_info_handle_journal_size(struct inode *inode,
+ struct ocfs2_info_request __user *req)
+ {
+- int status = -EFAULT;
+ struct ocfs2_info_journal_size oij;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ if (o2info_from_user(oij, req))
+- goto bail;
++ return -EFAULT;
+
+ oij.ij_journal_size = i_size_read(osb->journal->j_inode);
+
+ o2info_set_request_filled(&oij.ij_req);
+
+ if (o2info_to_user(oij, req))
+- goto bail;
++ return -EFAULT;
+
+- status = 0;
+-bail:
+- if (status)
+- o2info_set_request_error(&oij.ij_req, req);
+-
+- return status;
++ return 0;
+ }
+
+ static int ocfs2_info_scan_inode_alloc(struct ocfs2_super *osb,
+@@ -373,7 +330,7 @@ static int ocfs2_info_handle_freeinode(struct inode *inode,
+ u32 i;
+ u64 blkno = -1;
+ char namebuf[40];
+- int status = -EFAULT, type = INODE_ALLOC_SYSTEM_INODE;
++ int status, type = INODE_ALLOC_SYSTEM_INODE;
+ struct ocfs2_info_freeinode *oifi = NULL;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct inode *inode_alloc = NULL;
+@@ -385,8 +342,10 @@ static int ocfs2_info_handle_freeinode(struct inode *inode,
+ goto out_err;
+ }
+
+- if (o2info_from_user(*oifi, req))
+- goto bail;
++ if (o2info_from_user(*oifi, req)) {
++ status = -EFAULT;
++ goto out_free;
++ }
+
+ oifi->ifi_slotnum = osb->max_slots;
+
+@@ -424,14 +383,16 @@ static int ocfs2_info_handle_freeinode(struct inode *inode,
+
+ o2info_set_request_filled(&oifi->ifi_req);
+
+- if (o2info_to_user(*oifi, req))
+- goto bail;
++ if (o2info_to_user(*oifi, req)) {
++ status = -EFAULT;
++ goto out_free;
++ }
+
+ status = 0;
+ bail:
+ if (status)
+ o2info_set_request_error(&oifi->ifi_req, req);
+-
++out_free:
+ kfree(oifi);
+ out_err:
+ return status;
+@@ -658,7 +619,7 @@ static int ocfs2_info_handle_freefrag(struct inode *inode,
+ {
+ u64 blkno = -1;
+ char namebuf[40];
+- int status = -EFAULT, type = GLOBAL_BITMAP_SYSTEM_INODE;
++ int status, type = GLOBAL_BITMAP_SYSTEM_INODE;
+
+ struct ocfs2_info_freefrag *oiff;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+@@ -671,8 +632,10 @@ static int ocfs2_info_handle_freefrag(struct inode *inode,
+ goto out_err;
+ }
+
+- if (o2info_from_user(*oiff, req))
+- goto bail;
++ if (o2info_from_user(*oiff, req)) {
++ status = -EFAULT;
++ goto out_free;
++ }
+ /*
+ * chunksize from userspace should be power of 2.
+ */
+@@ -711,14 +674,14 @@ static int ocfs2_info_handle_freefrag(struct inode *inode,
+
+ if (o2info_to_user(*oiff, req)) {
+ status = -EFAULT;
+- goto bail;
++ goto out_free;
+ }
+
+ status = 0;
+ bail:
+ if (status)
+ o2info_set_request_error(&oiff->iff_req, req);
+-
++out_free:
+ kfree(oiff);
+ out_err:
+ return status;
+@@ -727,23 +690,17 @@ out_err:
+ static int ocfs2_info_handle_unknown(struct inode *inode,
+ struct ocfs2_info_request __user *req)
+ {
+- int status = -EFAULT;
+ struct ocfs2_info_request oir;
+
+ if (o2info_from_user(oir, req))
+- goto bail;
++ return -EFAULT;
+
+ o2info_clear_request_filled(&oir);
+
+ if (o2info_to_user(oir, req))
+- goto bail;
++ return -EFAULT;
+
+- status = 0;
+-bail:
+- if (status)
+- o2info_set_request_error(&oir, req);
+-
+- return status;
++ return 0;
+ }
+
+ /*
+diff --git a/fs/pnode.c b/fs/pnode.c
+index 302bf22c4a30..aae331a5d03b 100644
+--- a/fs/pnode.c
++++ b/fs/pnode.c
+@@ -381,6 +381,7 @@ static void __propagate_umount(struct mount *mnt)
+ * other children
+ */
+ if (child && list_empty(&child->mnt_mounts)) {
++ list_del_init(&child->mnt_child);
+ hlist_del_init_rcu(&child->mnt_hash);
+ hlist_add_before_rcu(&child->mnt_hash, &mnt->mnt_hash);
+ }
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index 64db2bceac59..3e1290b0492e 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -297,15 +297,11 @@ static void render_cap_t(struct seq_file *m, const char *header,
+ seq_puts(m, header);
+ CAP_FOR_EACH_U32(__capi) {
+ seq_printf(m, "%08x",
+- a->cap[(_KERNEL_CAPABILITY_U32S-1) - __capi]);
++ a->cap[CAP_LAST_U32 - __capi]);
+ }
+ seq_putc(m, '\n');
+ }
+
+-/* Remove non-existent capabilities */
+-#define NORM_CAPS(v) (v.cap[CAP_TO_INDEX(CAP_LAST_CAP)] &= \
+- CAP_TO_MASK(CAP_LAST_CAP + 1) - 1)
+-
+ static inline void task_cap(struct seq_file *m, struct task_struct *p)
+ {
+ const struct cred *cred;
+@@ -319,11 +315,6 @@ static inline void task_cap(struct seq_file *m, struct task_struct *p)
+ cap_bset = cred->cap_bset;
+ rcu_read_unlock();
+
+- NORM_CAPS(cap_inheritable);
+- NORM_CAPS(cap_permitted);
+- NORM_CAPS(cap_effective);
+- NORM_CAPS(cap_bset);
+-
+ render_cap_t(m, "CapInh:\t", &cap_inheritable);
+ render_cap_t(m, "CapPrm:\t", &cap_permitted);
+ render_cap_t(m, "CapEff:\t", &cap_effective);
+diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
+index 54fdf196bfb2..4d5e5297793f 100644
+--- a/fs/reiserfs/do_balan.c
++++ b/fs/reiserfs/do_balan.c
+@@ -286,12 +286,14 @@ static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
+ return 0;
+ }
+
+-static void balance_leaf_insert_left(struct tree_balance *tb,
+- struct item_head *ih, const char *body)
++static unsigned int balance_leaf_insert_left(struct tree_balance *tb,
++ struct item_head *const ih,
++ const char * const body)
+ {
+ int ret;
+ struct buffer_info bi;
+ int n = B_NR_ITEMS(tb->L[0]);
++ unsigned body_shift_bytes = 0;
+
+ if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) {
+ /* part of new item falls into L[0] */
+@@ -329,7 +331,7 @@ static void balance_leaf_insert_left(struct tree_balance *tb,
+
+ put_ih_item_len(ih, new_item_len);
+ if (tb->lbytes > tb->zeroes_num) {
+- body += (tb->lbytes - tb->zeroes_num);
++ body_shift_bytes = tb->lbytes - tb->zeroes_num;
+ tb->zeroes_num = 0;
+ } else
+ tb->zeroes_num -= tb->lbytes;
+@@ -349,11 +351,12 @@ static void balance_leaf_insert_left(struct tree_balance *tb,
+ tb->insert_size[0] = 0;
+ tb->zeroes_num = 0;
+ }
++ return body_shift_bytes;
+ }
+
+ static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb,
+- struct item_head *ih,
+- const char *body)
++ struct item_head * const ih,
++ const char * const body)
+ {
+ int n = B_NR_ITEMS(tb->L[0]);
+ struct buffer_info bi;
+@@ -413,17 +416,18 @@ static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb,
+ tb->pos_in_item -= tb->lbytes;
+ }
+
+-static void balance_leaf_paste_left_shift(struct tree_balance *tb,
+- struct item_head *ih,
+- const char *body)
++static unsigned int balance_leaf_paste_left_shift(struct tree_balance *tb,
++ struct item_head * const ih,
++ const char * const body)
+ {
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+ int n = B_NR_ITEMS(tb->L[0]);
+ struct buffer_info bi;
++ int body_shift_bytes = 0;
+
+ if (is_direntry_le_ih(item_head(tbS0, tb->item_pos))) {
+ balance_leaf_paste_left_shift_dirent(tb, ih, body);
+- return;
++ return 0;
+ }
+
+ RFALSE(tb->lbytes <= 0,
+@@ -497,7 +501,7 @@ static void balance_leaf_paste_left_shift(struct tree_balance *tb,
+ * insert_size[0]
+ */
+ if (l_n > tb->zeroes_num) {
+- body += (l_n - tb->zeroes_num);
++ body_shift_bytes = l_n - tb->zeroes_num;
+ tb->zeroes_num = 0;
+ } else
+ tb->zeroes_num -= l_n;
+@@ -526,13 +530,14 @@ static void balance_leaf_paste_left_shift(struct tree_balance *tb,
+ */
+ leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
+ }
++ return body_shift_bytes;
+ }
+
+
+ /* appended item will be in L[0] in whole */
+ static void balance_leaf_paste_left_whole(struct tree_balance *tb,
+- struct item_head *ih,
+- const char *body)
++ struct item_head * const ih,
++ const char * const body)
+ {
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+ int n = B_NR_ITEMS(tb->L[0]);
+@@ -584,39 +589,44 @@ static void balance_leaf_paste_left_whole(struct tree_balance *tb,
+ tb->zeroes_num = 0;
+ }
+
+-static void balance_leaf_paste_left(struct tree_balance *tb,
+- struct item_head *ih, const char *body)
++static unsigned int balance_leaf_paste_left(struct tree_balance *tb,
++ struct item_head * const ih,
++ const char * const body)
+ {
+ /* we must shift the part of the appended item */
+ if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1)
+- balance_leaf_paste_left_shift(tb, ih, body);
++ return balance_leaf_paste_left_shift(tb, ih, body);
+ else
+ balance_leaf_paste_left_whole(tb, ih, body);
++ return 0;
+ }
+
+ /* Shift lnum[0] items from S[0] to the left neighbor L[0] */
+-static void balance_leaf_left(struct tree_balance *tb, struct item_head *ih,
+- const char *body, int flag)
++static unsigned int balance_leaf_left(struct tree_balance *tb,
++ struct item_head * const ih,
++ const char * const body, int flag)
+ {
+ if (tb->lnum[0] <= 0)
+- return;
++ return 0;
+
+ /* new item or it part falls to L[0], shift it too */
+ if (tb->item_pos < tb->lnum[0]) {
+ BUG_ON(flag != M_INSERT && flag != M_PASTE);
+
+ if (flag == M_INSERT)
+- balance_leaf_insert_left(tb, ih, body);
++ return balance_leaf_insert_left(tb, ih, body);
+ else /* M_PASTE */
+- balance_leaf_paste_left(tb, ih, body);
++ return balance_leaf_paste_left(tb, ih, body);
+ } else
+ /* new item doesn't fall into L[0] */
+ leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
++ return 0;
+ }
+
+
+ static void balance_leaf_insert_right(struct tree_balance *tb,
+- struct item_head *ih, const char *body)
++ struct item_head * const ih,
++ const char * const body)
+ {
+
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+@@ -704,7 +714,8 @@ static void balance_leaf_insert_right(struct tree_balance *tb,
+
+
+ static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb,
+- struct item_head *ih, const char *body)
++ struct item_head * const ih,
++ const char * const body)
+ {
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+ struct buffer_info bi;
+@@ -754,7 +765,8 @@ static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb,
+ }
+
+ static void balance_leaf_paste_right_shift(struct tree_balance *tb,
+- struct item_head *ih, const char *body)
++ struct item_head * const ih,
++ const char * const body)
+ {
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+ int n_shift, n_rem, r_zeroes_number, version;
+@@ -831,7 +843,8 @@ static void balance_leaf_paste_right_shift(struct tree_balance *tb,
+ }
+
+ static void balance_leaf_paste_right_whole(struct tree_balance *tb,
+- struct item_head *ih, const char *body)
++ struct item_head * const ih,
++ const char * const body)
+ {
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+ int n = B_NR_ITEMS(tbS0);
+@@ -874,7 +887,8 @@ static void balance_leaf_paste_right_whole(struct tree_balance *tb,
+ }
+
+ static void balance_leaf_paste_right(struct tree_balance *tb,
+- struct item_head *ih, const char *body)
++ struct item_head * const ih,
++ const char * const body)
+ {
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+ int n = B_NR_ITEMS(tbS0);
+@@ -896,8 +910,9 @@ static void balance_leaf_paste_right(struct tree_balance *tb,
+ }
+
+ /* shift rnum[0] items from S[0] to the right neighbor R[0] */
+-static void balance_leaf_right(struct tree_balance *tb, struct item_head *ih,
+- const char *body, int flag)
++static void balance_leaf_right(struct tree_balance *tb,
++ struct item_head * const ih,
++ const char * const body, int flag)
+ {
+ if (tb->rnum[0] <= 0)
+ return;
+@@ -911,8 +926,8 @@ static void balance_leaf_right(struct tree_balance *tb, struct item_head *ih,
+ }
+
+ static void balance_leaf_new_nodes_insert(struct tree_balance *tb,
+- struct item_head *ih,
+- const char *body,
++ struct item_head * const ih,
++ const char * const body,
+ struct item_head *insert_key,
+ struct buffer_head **insert_ptr,
+ int i)
+@@ -1003,8 +1018,8 @@ static void balance_leaf_new_nodes_insert(struct tree_balance *tb,
+
+ /* we append to directory item */
+ static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb,
+- struct item_head *ih,
+- const char *body,
++ struct item_head * const ih,
++ const char * const body,
+ struct item_head *insert_key,
+ struct buffer_head **insert_ptr,
+ int i)
+@@ -1058,8 +1073,8 @@ static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb,
+ }
+
+ static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb,
+- struct item_head *ih,
+- const char *body,
++ struct item_head * const ih,
++ const char * const body,
+ struct item_head *insert_key,
+ struct buffer_head **insert_ptr,
+ int i)
+@@ -1131,8 +1146,8 @@ static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb,
+ }
+
+ static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb,
+- struct item_head *ih,
+- const char *body,
++ struct item_head * const ih,
++ const char * const body,
+ struct item_head *insert_key,
+ struct buffer_head **insert_ptr,
+ int i)
+@@ -1184,8 +1199,8 @@ static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb,
+
+ }
+ static void balance_leaf_new_nodes_paste(struct tree_balance *tb,
+- struct item_head *ih,
+- const char *body,
++ struct item_head * const ih,
++ const char * const body,
+ struct item_head *insert_key,
+ struct buffer_head **insert_ptr,
+ int i)
+@@ -1214,8 +1229,8 @@ static void balance_leaf_new_nodes_paste(struct tree_balance *tb,
+
+ /* Fill new nodes that appear in place of S[0] */
+ static void balance_leaf_new_nodes(struct tree_balance *tb,
+- struct item_head *ih,
+- const char *body,
++ struct item_head * const ih,
++ const char * const body,
+ struct item_head *insert_key,
+ struct buffer_head **insert_ptr,
+ int flag)
+@@ -1254,8 +1269,8 @@ static void balance_leaf_new_nodes(struct tree_balance *tb,
+ }
+
+ static void balance_leaf_finish_node_insert(struct tree_balance *tb,
+- struct item_head *ih,
+- const char *body)
++ struct item_head * const ih,
++ const char * const body)
+ {
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+ struct buffer_info bi;
+@@ -1271,8 +1286,8 @@ static void balance_leaf_finish_node_insert(struct tree_balance *tb,
+ }
+
+ static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb,
+- struct item_head *ih,
+- const char *body)
++ struct item_head * const ih,
++ const char * const body)
+ {
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+ struct item_head *pasted = item_head(tbS0, tb->item_pos);
+@@ -1305,8 +1320,8 @@ static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb,
+ }
+
+ static void balance_leaf_finish_node_paste(struct tree_balance *tb,
+- struct item_head *ih,
+- const char *body)
++ struct item_head * const ih,
++ const char * const body)
+ {
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+ struct buffer_info bi;
+@@ -1349,8 +1364,8 @@ static void balance_leaf_finish_node_paste(struct tree_balance *tb,
+ * of the affected item which remains in S
+ */
+ static void balance_leaf_finish_node(struct tree_balance *tb,
+- struct item_head *ih,
+- const char *body, int flag)
++ struct item_head * const ih,
++ const char * const body, int flag)
+ {
+ /* if we must insert or append into buffer S[0] */
+ if (0 <= tb->item_pos && tb->item_pos < tb->s0num) {
+@@ -1402,7 +1417,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih,
+ && is_indirect_le_ih(item_head(tbS0, tb->item_pos)))
+ tb->pos_in_item *= UNFM_P_SIZE;
+
+- balance_leaf_left(tb, ih, body, flag);
++ body += balance_leaf_left(tb, ih, body, flag);
+
+ /* tb->lnum[0] > 0 */
+ /* Calculate new item position */
+diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
+index e8870de4627e..a88b1b3e7db3 100644
+--- a/fs/reiserfs/journal.c
++++ b/fs/reiserfs/journal.c
+@@ -1947,8 +1947,6 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
+ }
+ }
+
+- /* wait for all commits to finish */
+- cancel_delayed_work(&SB_JOURNAL(sb)->j_work);
+
+ /*
+ * We must release the write lock here because
+@@ -1956,8 +1954,14 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
+ */
+ reiserfs_write_unlock(sb);
+
++ /*
++ * Cancel flushing of old commits. Note that neither of these works
++ * will be requeued because superblock is being shutdown and doesn't
++ * have MS_ACTIVE set.
++ */
+ cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work);
+- flush_workqueue(REISERFS_SB(sb)->commit_wq);
++ /* wait for all commits to finish */
++ cancel_delayed_work_sync(&SB_JOURNAL(sb)->j_work);
+
+ free_journal_ram(sb);
+
+@@ -4292,9 +4296,15 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, int flags)
+ if (flush) {
+ flush_commit_list(sb, jl, 1);
+ flush_journal_list(sb, jl, 1);
+- } else if (!(jl->j_state & LIST_COMMIT_PENDING))
+- queue_delayed_work(REISERFS_SB(sb)->commit_wq,
+- &journal->j_work, HZ / 10);
++ } else if (!(jl->j_state & LIST_COMMIT_PENDING)) {
++ /*
++ * Avoid queueing work when sb is being shut down. Transaction
++ * will be flushed on journal shutdown.
++ */
++ if (sb->s_flags & MS_ACTIVE)
++ queue_delayed_work(REISERFS_SB(sb)->commit_wq,
++ &journal->j_work, HZ / 10);
++ }
+
+ /*
+ * if the next transaction has any chance of wrapping, flush
+diff --git a/fs/reiserfs/lbalance.c b/fs/reiserfs/lbalance.c
+index d6744c8b24e1..3a74d15eb814 100644
+--- a/fs/reiserfs/lbalance.c
++++ b/fs/reiserfs/lbalance.c
+@@ -899,8 +899,9 @@ void leaf_delete_items(struct buffer_info *cur_bi, int last_first,
+
+ /* insert item into the leaf node in position before */
+ void leaf_insert_into_buf(struct buffer_info *bi, int before,
+- struct item_head *inserted_item_ih,
+- const char *inserted_item_body, int zeros_number)
++ struct item_head * const inserted_item_ih,
++ const char * const inserted_item_body,
++ int zeros_number)
+ {
+ struct buffer_head *bh = bi->bi_bh;
+ int nr, free_space;
+diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
+index bf53888c7f59..735c2c2b4536 100644
+--- a/fs/reiserfs/reiserfs.h
++++ b/fs/reiserfs/reiserfs.h
+@@ -3216,11 +3216,12 @@ int leaf_shift_right(struct tree_balance *tb, int shift_num, int shift_bytes);
+ void leaf_delete_items(struct buffer_info *cur_bi, int last_first, int first,
+ int del_num, int del_bytes);
+ void leaf_insert_into_buf(struct buffer_info *bi, int before,
+- struct item_head *inserted_item_ih,
+- const char *inserted_item_body, int zeros_number);
+-void leaf_paste_in_buffer(struct buffer_info *bi, int pasted_item_num,
+- int pos_in_item, int paste_size, const char *body,
++ struct item_head * const inserted_item_ih,
++ const char * const inserted_item_body,
+ int zeros_number);
++void leaf_paste_in_buffer(struct buffer_info *bi, int pasted_item_num,
++ int pos_in_item, int paste_size,
++ const char * const body, int zeros_number);
+ void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num,
+ int pos_in_item, int cut_size);
+ void leaf_paste_entries(struct buffer_info *bi, int item_num, int before,
+diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
+index a392cef6acc6..5fd8f57e07fc 100644
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -100,7 +100,11 @@ void reiserfs_schedule_old_flush(struct super_block *s)
+ struct reiserfs_sb_info *sbi = REISERFS_SB(s);
+ unsigned long delay;
+
+- if (s->s_flags & MS_RDONLY)
++ /*
++ * Avoid scheduling flush when sb is being shut down. It can race
++ * with journal shutdown and free still queued delayed work.
++ */
++ if (s->s_flags & MS_RDONLY || !(s->s_flags & MS_ACTIVE))
+ return;
+
+ spin_lock(&sbi->old_work_lock);
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
+index faaf716e2080..02614349690d 100644
+--- a/fs/xfs/xfs_aops.c
++++ b/fs/xfs/xfs_aops.c
+@@ -1753,11 +1753,72 @@ xfs_vm_readpages(
+ return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
+ }
+
++/*
++ * This is basically a copy of __set_page_dirty_buffers() with one
++ * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
++ * dirty, we'll never be able to clean them because we don't write buffers
++ * beyond EOF, and that means we can't invalidate pages that span EOF
++ * that have been marked dirty. Further, the dirty state can leak into
++ * the file interior if the file is extended, resulting in all sorts of
++ * bad things happening as the state does not match the underlying data.
++ *
++ * XXX: this really indicates that bufferheads in XFS need to die. Warts like
++ * this only exist because of bufferheads and how the generic code manages them.
++ */
++STATIC int
++xfs_vm_set_page_dirty(
++ struct page *page)
++{
++ struct address_space *mapping = page->mapping;
++ struct inode *inode = mapping->host;
++ loff_t end_offset;
++ loff_t offset;
++ int newly_dirty;
++
++ if (unlikely(!mapping))
++ return !TestSetPageDirty(page);
++
++ end_offset = i_size_read(inode);
++ offset = page_offset(page);
++
++ spin_lock(&mapping->private_lock);
++ if (page_has_buffers(page)) {
++ struct buffer_head *head = page_buffers(page);
++ struct buffer_head *bh = head;
++
++ do {
++ if (offset < end_offset)
++ set_buffer_dirty(bh);
++ bh = bh->b_this_page;
++ offset += 1 << inode->i_blkbits;
++ } while (bh != head);
++ }
++ newly_dirty = !TestSetPageDirty(page);
++ spin_unlock(&mapping->private_lock);
++
++ if (newly_dirty) {
++ /* sigh - __set_page_dirty() is static, so copy it here, too */
++ unsigned long flags;
++
++ spin_lock_irqsave(&mapping->tree_lock, flags);
++ if (page->mapping) { /* Race with truncate? */
++ WARN_ON_ONCE(!PageUptodate(page));
++ account_page_dirtied(page, mapping);
++ radix_tree_tag_set(&mapping->page_tree,
++ page_index(page), PAGECACHE_TAG_DIRTY);
++ }
++ spin_unlock_irqrestore(&mapping->tree_lock, flags);
++ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
++ }
++ return newly_dirty;
++}
++
+ const struct address_space_operations xfs_address_space_operations = {
+ .readpage = xfs_vm_readpage,
+ .readpages = xfs_vm_readpages,
+ .writepage = xfs_vm_writepage,
+ .writepages = xfs_vm_writepages,
++ .set_page_dirty = xfs_vm_set_page_dirty,
+ .releasepage = xfs_vm_releasepage,
+ .invalidatepage = xfs_vm_invalidatepage,
+ .write_begin = xfs_vm_write_begin,
+diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
+index 3ee0cd43edc0..c9656491d823 100644
+--- a/fs/xfs/xfs_dquot.c
++++ b/fs/xfs/xfs_dquot.c
+@@ -974,7 +974,8 @@ xfs_qm_dqflush(
+ * Get the buffer containing the on-disk dquot
+ */
+ error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
+- mp->m_quotainfo->qi_dqchunklen, 0, &bp, NULL);
++ mp->m_quotainfo->qi_dqchunklen, 0, &bp,
++ &xfs_dquot_buf_ops);
+ if (error)
+ goto out_unlock;
+
+diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
+index 1f66779d7a46..055459999660 100644
+--- a/fs/xfs/xfs_file.c
++++ b/fs/xfs/xfs_file.c
+@@ -295,7 +295,16 @@ xfs_file_read_iter(
+ xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
+ return ret;
+ }
+- truncate_pagecache_range(VFS_I(ip), pos, -1);
++
++ /*
++ * Invalidate whole pages. This can return an error if
++ * we fail to invalidate a page, but this should never
++ * happen on XFS. Warn if it does fail.
++ */
++ ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
++ pos >> PAGE_CACHE_SHIFT, -1);
++ WARN_ON_ONCE(ret);
++ ret = 0;
+ }
+ xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
+ }
+@@ -634,7 +643,15 @@ xfs_file_dio_aio_write(
+ pos, -1);
+ if (ret)
+ goto out;
+- truncate_pagecache_range(VFS_I(ip), pos, -1);
++ /*
++ * Invalidate whole pages. This can return an error if
++ * we fail to invalidate a page, but this should never
++ * happen on XFS. Warn if it does fail.
++ */
++ ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
++ pos >> PAGE_CACHE_SHIFT, -1);
++ WARN_ON_ONCE(ret);
++ ret = 0;
+ }
+
+ /*
+diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
+index 981af0f6504b..8c962890fe17 100644
+--- a/fs/xfs/xfs_log_recover.c
++++ b/fs/xfs/xfs_log_recover.c
+@@ -2125,6 +2125,17 @@ xlog_recover_validate_buf_type(
+ __uint16_t magic16;
+ __uint16_t magicda;
+
++ /*
++ * We can only do post recovery validation on items on CRC enabled
++ * fielsystems as we need to know when the buffer was written to be able
++ * to determine if we should have replayed the item. If we replay old
++ * metadata over a newer buffer, then it will enter a temporarily
++ * inconsistent state resulting in verification failures. Hence for now
++ * just avoid the verification stage for non-crc filesystems
++ */
++ if (!xfs_sb_version_hascrc(&mp->m_sb))
++ return;
++
+ magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
+ magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
+ magicda = be16_to_cpu(info->magic);
+@@ -2162,8 +2173,6 @@ xlog_recover_validate_buf_type(
+ bp->b_ops = &xfs_agf_buf_ops;
+ break;
+ case XFS_BLFT_AGFL_BUF:
+- if (!xfs_sb_version_hascrc(&mp->m_sb))
+- break;
+ if (magic32 != XFS_AGFL_MAGIC) {
+ xfs_warn(mp, "Bad AGFL block magic!");
+ ASSERT(0);
+@@ -2196,10 +2205,6 @@ xlog_recover_validate_buf_type(
+ #endif
+ break;
+ case XFS_BLFT_DINO_BUF:
+- /*
+- * we get here with inode allocation buffers, not buffers that
+- * track unlinked list changes.
+- */
+ if (magic16 != XFS_DINODE_MAGIC) {
+ xfs_warn(mp, "Bad INODE block magic!");
+ ASSERT(0);
+@@ -2279,8 +2284,6 @@ xlog_recover_validate_buf_type(
+ bp->b_ops = &xfs_attr3_leaf_buf_ops;
+ break;
+ case XFS_BLFT_ATTR_RMT_BUF:
+- if (!xfs_sb_version_hascrc(&mp->m_sb))
+- break;
+ if (magic32 != XFS_ATTR3_RMT_MAGIC) {
+ xfs_warn(mp, "Bad attr remote magic!");
+ ASSERT(0);
+@@ -2387,16 +2390,7 @@ xlog_recover_do_reg_buffer(
+ /* Shouldn't be any more regions */
+ ASSERT(i == item->ri_total);
+
+- /*
+- * We can only do post recovery validation on items on CRC enabled
+- * fielsystems as we need to know when the buffer was written to be able
+- * to determine if we should have replayed the item. If we replay old
+- * metadata over a newer buffer, then it will enter a temporarily
+- * inconsistent state resulting in verification failures. Hence for now
+- * just avoid the verification stage for non-crc filesystems
+- */
+- if (xfs_sb_version_hascrc(&mp->m_sb))
+- xlog_recover_validate_buf_type(mp, bp, buf_f);
++ xlog_recover_validate_buf_type(mp, bp, buf_f);
+ }
+
+ /*
+@@ -2504,12 +2498,29 @@ xlog_recover_buffer_pass2(
+ }
+
+ /*
+- * recover the buffer only if we get an LSN from it and it's less than
++ * Recover the buffer only if we get an LSN from it and it's less than
+ * the lsn of the transaction we are replaying.
++ *
++ * Note that we have to be extremely careful of readahead here.
++ * Readahead does not attach verfiers to the buffers so if we don't
++ * actually do any replay after readahead because of the LSN we found
++ * in the buffer if more recent than that current transaction then we
++ * need to attach the verifier directly. Failure to do so can lead to
++ * future recovery actions (e.g. EFI and unlinked list recovery) can
++ * operate on the buffers and they won't get the verifier attached. This
++ * can lead to blocks on disk having the correct content but a stale
++ * CRC.
++ *
++ * It is safe to assume these clean buffers are currently up to date.
++ * If the buffer is dirtied by a later transaction being replayed, then
++ * the verifier will be reset to match whatever recover turns that
++ * buffer into.
+ */
+ lsn = xlog_recover_get_buf_lsn(mp, bp);
+- if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0)
++ if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
++ xlog_recover_validate_buf_type(mp, bp, buf_f);
+ goto out_release;
++ }
+
+ if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
+ error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
+diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
+index 6d26759c779a..6c51e2f97c0a 100644
+--- a/fs/xfs/xfs_qm.c
++++ b/fs/xfs/xfs_qm.c
+@@ -1005,6 +1005,12 @@ xfs_qm_dqiter_bufs(
+ if (error)
+ break;
+
++ /*
++ * A corrupt buffer might not have a verifier attached, so
++ * make sure we have the correct one attached before writeback
++ * occurs.
++ */
++ bp->b_ops = &xfs_dquot_buf_ops;
+ xfs_qm_reset_dqcounts(mp, bp, firstid, type);
+ xfs_buf_delwri_queue(bp, buffer_list);
+ xfs_buf_relse(bp);
+@@ -1090,7 +1096,7 @@ xfs_qm_dqiterate(
+ xfs_buf_readahead(mp->m_ddev_targp,
+ XFS_FSB_TO_DADDR(mp, rablkno),
+ mp->m_quotainfo->qi_dqchunklen,
+- NULL);
++ &xfs_dquot_buf_ops);
+ rablkno++;
+ }
+ }
+diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
+index b5714580801a..0826a4407e8e 100644
+--- a/include/acpi/acpi_bus.h
++++ b/include/acpi/acpi_bus.h
+@@ -246,7 +246,6 @@ struct acpi_device_pnp {
+ acpi_device_name device_name; /* Driver-determined */
+ acpi_device_class device_class; /* " */
+ union acpi_object *str_obj; /* unicode string for _STR method */
+- unsigned long sun; /* _SUN */
+ };
+
+ #define acpi_device_bid(d) ((d)->pnp.bus_id)
+diff --git a/include/linux/capability.h b/include/linux/capability.h
+index 84b13ad67c1c..aa93e5ef594c 100644
+--- a/include/linux/capability.h
++++ b/include/linux/capability.h
+@@ -78,8 +78,11 @@ extern const kernel_cap_t __cap_init_eff_set;
+ # error Fix up hand-coded capability macro initializers
+ #else /* HAND-CODED capability initializers */
+
++#define CAP_LAST_U32 ((_KERNEL_CAPABILITY_U32S) - 1)
++#define CAP_LAST_U32_VALID_MASK (CAP_TO_MASK(CAP_LAST_CAP + 1) -1)
++
+ # define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }})
+-# define CAP_FULL_SET ((kernel_cap_t){{ ~0, ~0 }})
++# define CAP_FULL_SET ((kernel_cap_t){{ ~0, CAP_LAST_U32_VALID_MASK }})
+ # define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \
+ | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \
+ CAP_FS_MASK_B1 } })
+diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
+index fc7718c6bd3e..d2be2526ec48 100644
+--- a/include/linux/fsnotify_backend.h
++++ b/include/linux/fsnotify_backend.h
+@@ -326,6 +326,8 @@ extern int fsnotify_add_notify_event(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ int (*merge)(struct list_head *,
+ struct fsnotify_event *));
++/* Remove passed event from groups notification queue */
++extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event);
+ /* true if the group notification queue is empty */
+ extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
+ /* return, but do not dequeue the first event on the notification queue */
+diff --git a/include/linux/mount.h b/include/linux/mount.h
+index 839bac270904..b0c1e6574e7f 100644
+--- a/include/linux/mount.h
++++ b/include/linux/mount.h
+@@ -42,13 +42,20 @@ struct mnt_namespace;
+ * flag, consider how it interacts with shared mounts.
+ */
+ #define MNT_SHARED_MASK (MNT_UNBINDABLE)
+-#define MNT_PROPAGATION_MASK (MNT_SHARED | MNT_UNBINDABLE)
++#define MNT_USER_SETTABLE_MASK (MNT_NOSUID | MNT_NODEV | MNT_NOEXEC \
++ | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME \
++ | MNT_READONLY)
++#define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME )
+
+ #define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \
+ MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED)
+
+ #define MNT_INTERNAL 0x4000
+
++#define MNT_LOCK_ATIME 0x040000
++#define MNT_LOCK_NOEXEC 0x080000
++#define MNT_LOCK_NOSUID 0x100000
++#define MNT_LOCK_NODEV 0x200000
+ #define MNT_LOCK_READONLY 0x400000
+ #define MNT_LOCKED 0x800000
+ #define MNT_DOOMED 0x1000000
+diff --git a/include/linux/tpm.h b/include/linux/tpm.h
+index fff1d0976f80..8350c538b486 100644
+--- a/include/linux/tpm.h
++++ b/include/linux/tpm.h
+@@ -39,6 +39,9 @@ struct tpm_class_ops {
+ int (*send) (struct tpm_chip *chip, u8 *buf, size_t len);
+ void (*cancel) (struct tpm_chip *chip);
+ u8 (*status) (struct tpm_chip *chip);
++ bool (*update_timeouts)(struct tpm_chip *chip,
++ unsigned long *timeout_cap);
++
+ };
+
+ #if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
+index 27ab31017f09..758bc9f0f399 100644
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -155,6 +155,7 @@ struct scsi_device {
+ unsigned skip_ms_page_8:1; /* do not use MODE SENSE page 0x08 */
+ unsigned skip_ms_page_3f:1; /* do not use MODE SENSE page 0x3f */
+ unsigned skip_vpd_pages:1; /* do not read VPD pages */
++ unsigned try_vpd_pages:1; /* attempt to read VPD pages */
+ unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */
+ unsigned no_start_on_add:1; /* do not issue start on add */
+ unsigned allow_restart:1; /* issue START_UNIT in error handler */
+diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
+index 447d2d7466fc..183eaab7c380 100644
+--- a/include/scsi/scsi_devinfo.h
++++ b/include/scsi/scsi_devinfo.h
+@@ -32,4 +32,9 @@
+ #define BLIST_ATTACH_PQ3 0x1000000 /* Scan: Attach to PQ3 devices */
+ #define BLIST_NO_DIF 0x2000000 /* Disable T10 PI (DIF) */
+ #define BLIST_SKIP_VPD_PAGES 0x4000000 /* Ignore SBC-3 VPD pages */
++#define BLIST_SCSI3LUN 0x8000000 /* Scan more than 256 LUNs
++ for sequential scan */
++#define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */
++#define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */
++
+ #endif
+diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h
+index 99b80abf360a..3066718eb120 100644
+--- a/include/uapi/rdma/rdma_user_cm.h
++++ b/include/uapi/rdma/rdma_user_cm.h
+@@ -34,6 +34,7 @@
+ #define RDMA_USER_CM_H
+
+ #include <linux/types.h>
++#include <linux/socket.h>
+ #include <linux/in6.h>
+ #include <rdma/ib_user_verbs.h>
+ #include <rdma/ib_user_sa.h>
+diff --git a/kernel/audit.c b/kernel/audit.c
+index 3ef2e0e797e8..ba2ff5a5c600 100644
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -1677,7 +1677,7 @@ void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap)
+ audit_log_format(ab, " %s=", prefix);
+ CAP_FOR_EACH_U32(i) {
+ audit_log_format(ab, "%08x",
+- cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]);
++ cap->cap[CAP_LAST_U32 - i]);
+ }
+ }
+
+diff --git a/kernel/capability.c b/kernel/capability.c
+index a5cf13c018ce..989f5bfc57dc 100644
+--- a/kernel/capability.c
++++ b/kernel/capability.c
+@@ -258,6 +258,10 @@ SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data)
+ i++;
+ }
+
++ effective.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
++ permitted.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
++ inheritable.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
++
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+diff --git a/kernel/smp.c b/kernel/smp.c
+index 80c33f8de14f..86e59ee8dd76 100644
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -661,7 +661,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+ if (cond_func(cpu, info)) {
+ ret = smp_call_function_single(cpu, func,
+ info, wait);
+- WARN_ON_ONCE(!ret);
++ WARN_ON_ONCE(ret);
+ }
+ preempt_enable();
+ }
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index ff7027199a9a..b95381ebdd5e 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1984,7 +1984,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
+
+ /**
+ * rb_update_event - update event type and data
+- * @event: the even to update
++ * @event: the event to update
+ * @type: the type of event
+ * @length: the size of the event field in the ring buffer
+ *
+@@ -3357,21 +3357,16 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
+ struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+
+ /* Iterator usage is expected to have record disabled */
+- if (list_empty(&cpu_buffer->reader_page->list)) {
+- iter->head_page = rb_set_head_page(cpu_buffer);
+- if (unlikely(!iter->head_page))
+- return;
+- iter->head = iter->head_page->read;
+- } else {
+- iter->head_page = cpu_buffer->reader_page;
+- iter->head = cpu_buffer->reader_page->read;
+- }
++ iter->head_page = cpu_buffer->reader_page;
++ iter->head = cpu_buffer->reader_page->read;
++
++ iter->cache_reader_page = iter->head_page;
++ iter->cache_read = iter->head;
++
+ if (iter->head)
+ iter->read_stamp = cpu_buffer->read_stamp;
+ else
+ iter->read_stamp = iter->head_page->page->time_stamp;
+- iter->cache_reader_page = cpu_buffer->reader_page;
+- iter->cache_read = cpu_buffer->read;
+ }
+
+ /**
+@@ -3764,12 +3759,14 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
+ return NULL;
+
+ /*
+- * We repeat when a time extend is encountered.
+- * Since the time extend is always attached to a data event,
+- * we should never loop more than once.
+- * (We never hit the following condition more than twice).
++ * We repeat when a time extend is encountered or we hit
++ * the end of the page. Since the time extend is always attached
++ * to a data event, we should never loop more than three times.
++ * Once for going to next page, once on time extend, and
++ * finally once to get the event.
++ * (We never hit the following condition more than thrice).
+ */
+- if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
++ if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3))
+ return NULL;
+
+ if (rb_per_cpu_empty(cpu_buffer))
+diff --git a/lib/assoc_array.c b/lib/assoc_array.c
+index c0b1007011e1..2404d03e251a 100644
+--- a/lib/assoc_array.c
++++ b/lib/assoc_array.c
+@@ -1723,11 +1723,13 @@ ascend_old_tree:
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ slot = shortcut->parent_slot;
+ cursor = shortcut->back_pointer;
++ if (!cursor)
++ goto gc_complete;
+ } else {
+ slot = node->parent_slot;
+ cursor = ptr;
+ }
+- BUG_ON(!ptr);
++ BUG_ON(!cursor);
+ node = assoc_array_ptr_to_node(cursor);
+ slot++;
+ goto continue_node;
+@@ -1735,7 +1737,7 @@ ascend_old_tree:
+ gc_complete:
+ edit->set[0].to = new_root;
+ assoc_array_apply_edit(edit);
+- edit->array->nr_leaves_on_tree = nr_leaves_on_tree;
++ array->nr_leaves_on_tree = nr_leaves_on_tree;
+ return 0;
+
+ enomem:
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 900edfaf6df5..8163e0439493 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -2584,7 +2584,7 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ * that this differs from normal direct-io semantics, which
+ * will return -EFOO even if some bytes were written.
+ */
+- if (unlikely(status < 0) && !written) {
++ if (unlikely(status < 0)) {
+ err = status;
+ goto out;
+ }
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 7a0a73d2fcff..7ae54449f252 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1089,6 +1089,9 @@ void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
+ unsigned long pfn;
+ struct hstate *h;
+
++ if (!hugepages_supported())
++ return;
++
+ /* Set scan step to minimum hugepage size */
+ for_each_hstate(h)
+ if (order > huge_page_order(h))
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 640c54ec1bd2..3787be160c2b 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -3538,18 +3538,14 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
+
+ /* If we are initiators, there is no remote information yet */
+ if (conn->remote_auth == 0xff) {
+- cp.authentication = conn->auth_type;
+-
+ /* Request MITM protection if our IO caps allow it
+ * except for the no-bonding case.
+- * conn->auth_type is not updated here since
+- * that might cause the user confirmation to be
+- * rejected in case the remote doesn't have the
+- * IO capabilities for MITM.
+ */
+ if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
+- cp.authentication != HCI_AT_NO_BONDING)
+- cp.authentication |= 0x01;
++ conn->auth_type != HCI_AT_NO_BONDING)
++ conn->auth_type |= 0x01;
++
++ cp.authentication = conn->auth_type;
+ } else {
+ conn->auth_type = hci_get_auth_req(conn);
+ cp.authentication = conn->auth_type;
+@@ -3621,9 +3617,12 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
+ rem_mitm = (conn->remote_auth & 0x01);
+
+ /* If we require MITM but the remote device can't provide that
+- * (it has NoInputNoOutput) then reject the confirmation request
++ * (it has NoInputNoOutput) then reject the confirmation
++ * request. We check the security level here since it doesn't
++ * necessarily match conn->auth_type.
+ */
+- if (loc_mitm && conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
++ if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
++ conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
+ BT_DBG("Rejecting request: remote device can't provide MITM");
+ hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
+ sizeof(ev->bdaddr), &ev->bdaddr);
+@@ -4177,8 +4176,8 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
+ * sending a merged device found event.
+ */
+ mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
+- d->last_adv_addr_type, NULL, rssi, 0, 1, data, len,
+- d->last_adv_data, d->last_adv_data_len);
++ d->last_adv_addr_type, NULL, rssi, 0, 1,
++ d->last_adv_data, d->last_adv_data_len, data, len);
+ clear_pending_adv_report(hdev);
+ }
+
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index e1378693cc90..d0fd8b04f2e6 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -1111,7 +1111,8 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
+ l2cap_chan_close(chan, 0);
+ lock_sock(sk);
+
+- if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
++ if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
++ !(current->flags & PF_EXITING))
+ err = bt_sock_wait_state(sk, BT_CLOSED,
+ sk->sk_lingertime);
+ }
+diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
+index 754b6fe4f742..881f7de412cc 100644
+--- a/net/bluetooth/rfcomm/core.c
++++ b/net/bluetooth/rfcomm/core.c
+@@ -1909,10 +1909,13 @@ static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s)
+ /* Get data directly from socket receive queue without copying it. */
+ while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
+ skb_orphan(skb);
+- if (!skb_linearize(skb))
++ if (!skb_linearize(skb)) {
+ s = rfcomm_recv_frame(s, skb);
+- else
++ if (!s)
++ break;
++ } else {
+ kfree_skb(skb);
++ }
+ }
+
+ if (s && (sk->sk_state == BT_CLOSED))
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index c603a5eb4720..8bbbb5ec468c 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -918,7 +918,8 @@ static int rfcomm_sock_shutdown(struct socket *sock, int how)
+ sk->sk_shutdown = SHUTDOWN_MASK;
+ __rfcomm_sock_close(sk);
+
+- if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
++ if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
++ !(current->flags & PF_EXITING))
+ err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
+ }
+ release_sock(sk);
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index c06dbd3938e8..dbbbc0292bd0 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -909,7 +909,8 @@ static int sco_sock_shutdown(struct socket *sock, int how)
+ sco_sock_clear_timer(sk);
+ __sco_sock_close(sk);
+
+- if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
++ if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
++ !(current->flags & PF_EXITING))
+ err = bt_sock_wait_state(sk, BT_CLOSED,
+ sk->sk_lingertime);
+ }
+@@ -929,7 +930,8 @@ static int sco_sock_release(struct socket *sock)
+
+ sco_sock_close(sk);
+
+- if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) {
++ if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
++ !(current->flags & PF_EXITING)) {
+ lock_sock(sk);
+ err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
+ release_sock(sk);
+diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
+index 96238ba95f2b..de6662b14e1f 100644
+--- a/net/ceph/auth_x.c
++++ b/net/ceph/auth_x.c
+@@ -13,8 +13,6 @@
+ #include "auth_x.h"
+ #include "auth_x_protocol.h"
+
+-#define TEMP_TICKET_BUF_LEN 256
+-
+ static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed);
+
+ static int ceph_x_is_authenticated(struct ceph_auth_client *ac)
+@@ -64,7 +62,7 @@ static int ceph_x_encrypt(struct ceph_crypto_key *secret,
+ }
+
+ static int ceph_x_decrypt(struct ceph_crypto_key *secret,
+- void **p, void *end, void *obuf, size_t olen)
++ void **p, void *end, void **obuf, size_t olen)
+ {
+ struct ceph_x_encrypt_header head;
+ size_t head_len = sizeof(head);
+@@ -75,8 +73,14 @@ static int ceph_x_decrypt(struct ceph_crypto_key *secret,
+ return -EINVAL;
+
+ dout("ceph_x_decrypt len %d\n", len);
+- ret = ceph_decrypt2(secret, &head, &head_len, obuf, &olen,
+- *p, len);
++ if (*obuf == NULL) {
++ *obuf = kmalloc(len, GFP_NOFS);
++ if (!*obuf)
++ return -ENOMEM;
++ olen = len;
++ }
++
++ ret = ceph_decrypt2(secret, &head, &head_len, *obuf, &olen, *p, len);
+ if (ret)
+ return ret;
+ if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC)
+@@ -129,139 +133,120 @@ static void remove_ticket_handler(struct ceph_auth_client *ac,
+ kfree(th);
+ }
+
+-static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
+- struct ceph_crypto_key *secret,
+- void *buf, void *end)
++static int process_one_ticket(struct ceph_auth_client *ac,
++ struct ceph_crypto_key *secret,
++ void **p, void *end)
+ {
+ struct ceph_x_info *xi = ac->private;
+- int num;
+- void *p = buf;
++ int type;
++ u8 tkt_struct_v, blob_struct_v;
++ struct ceph_x_ticket_handler *th;
++ void *dbuf = NULL;
++ void *dp, *dend;
++ int dlen;
++ char is_enc;
++ struct timespec validity;
++ struct ceph_crypto_key old_key;
++ void *ticket_buf = NULL;
++ void *tp, *tpend;
++ struct ceph_timespec new_validity;
++ struct ceph_crypto_key new_session_key;
++ struct ceph_buffer *new_ticket_blob;
++ unsigned long new_expires, new_renew_after;
++ u64 new_secret_id;
+ int ret;
+- char *dbuf;
+- char *ticket_buf;
+- u8 reply_struct_v;
+
+- dbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
+- if (!dbuf)
+- return -ENOMEM;
++ ceph_decode_need(p, end, sizeof(u32) + 1, bad);
+
+- ret = -ENOMEM;
+- ticket_buf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
+- if (!ticket_buf)
+- goto out_dbuf;
++ type = ceph_decode_32(p);
++ dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
+
+- ceph_decode_need(&p, end, 1 + sizeof(u32), bad);
+- reply_struct_v = ceph_decode_8(&p);
+- if (reply_struct_v != 1)
++ tkt_struct_v = ceph_decode_8(p);
++ if (tkt_struct_v != 1)
+ goto bad;
+- num = ceph_decode_32(&p);
+- dout("%d tickets\n", num);
+- while (num--) {
+- int type;
+- u8 tkt_struct_v, blob_struct_v;
+- struct ceph_x_ticket_handler *th;
+- void *dp, *dend;
+- int dlen;
+- char is_enc;
+- struct timespec validity;
+- struct ceph_crypto_key old_key;
+- void *tp, *tpend;
+- struct ceph_timespec new_validity;
+- struct ceph_crypto_key new_session_key;
+- struct ceph_buffer *new_ticket_blob;
+- unsigned long new_expires, new_renew_after;
+- u64 new_secret_id;
+-
+- ceph_decode_need(&p, end, sizeof(u32) + 1, bad);
+-
+- type = ceph_decode_32(&p);
+- dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
+-
+- tkt_struct_v = ceph_decode_8(&p);
+- if (tkt_struct_v != 1)
+- goto bad;
+-
+- th = get_ticket_handler(ac, type);
+- if (IS_ERR(th)) {
+- ret = PTR_ERR(th);
+- goto out;
+- }
+
+- /* blob for me */
+- dlen = ceph_x_decrypt(secret, &p, end, dbuf,
+- TEMP_TICKET_BUF_LEN);
+- if (dlen <= 0) {
+- ret = dlen;
+- goto out;
+- }
+- dout(" decrypted %d bytes\n", dlen);
+- dend = dbuf + dlen;
+- dp = dbuf;
++ th = get_ticket_handler(ac, type);
++ if (IS_ERR(th)) {
++ ret = PTR_ERR(th);
++ goto out;
++ }
+
+- tkt_struct_v = ceph_decode_8(&dp);
+- if (tkt_struct_v != 1)
+- goto bad;
++ /* blob for me */
++ dlen = ceph_x_decrypt(secret, p, end, &dbuf, 0);
++ if (dlen <= 0) {
++ ret = dlen;
++ goto out;
++ }
++ dout(" decrypted %d bytes\n", dlen);
++ dp = dbuf;
++ dend = dp + dlen;
+
+- memcpy(&old_key, &th->session_key, sizeof(old_key));
+- ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
+- if (ret)
+- goto out;
++ tkt_struct_v = ceph_decode_8(&dp);
++ if (tkt_struct_v != 1)
++ goto bad;
+
+- ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
+- ceph_decode_timespec(&validity, &new_validity);
+- new_expires = get_seconds() + validity.tv_sec;
+- new_renew_after = new_expires - (validity.tv_sec / 4);
+- dout(" expires=%lu renew_after=%lu\n", new_expires,
+- new_renew_after);
++ memcpy(&old_key, &th->session_key, sizeof(old_key));
++ ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
++ if (ret)
++ goto out;
+
+- /* ticket blob for service */
+- ceph_decode_8_safe(&p, end, is_enc, bad);
+- tp = ticket_buf;
+- if (is_enc) {
+- /* encrypted */
+- dout(" encrypted ticket\n");
+- dlen = ceph_x_decrypt(&old_key, &p, end, ticket_buf,
+- TEMP_TICKET_BUF_LEN);
+- if (dlen < 0) {
+- ret = dlen;
+- goto out;
+- }
+- dlen = ceph_decode_32(&tp);
+- } else {
+- /* unencrypted */
+- ceph_decode_32_safe(&p, end, dlen, bad);
+- ceph_decode_need(&p, end, dlen, bad);
+- ceph_decode_copy(&p, ticket_buf, dlen);
++ ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
++ ceph_decode_timespec(&validity, &new_validity);
++ new_expires = get_seconds() + validity.tv_sec;
++ new_renew_after = new_expires - (validity.tv_sec / 4);
++ dout(" expires=%lu renew_after=%lu\n", new_expires,
++ new_renew_after);
++
++ /* ticket blob for service */
++ ceph_decode_8_safe(p, end, is_enc, bad);
++ if (is_enc) {
++ /* encrypted */
++ dout(" encrypted ticket\n");
++ dlen = ceph_x_decrypt(&old_key, p, end, &ticket_buf, 0);
++ if (dlen < 0) {
++ ret = dlen;
++ goto out;
+ }
+- tpend = tp + dlen;
+- dout(" ticket blob is %d bytes\n", dlen);
+- ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
+- blob_struct_v = ceph_decode_8(&tp);
+- new_secret_id = ceph_decode_64(&tp);
+- ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
+- if (ret)
++ tp = ticket_buf;
++ dlen = ceph_decode_32(&tp);
++ } else {
++ /* unencrypted */
++ ceph_decode_32_safe(p, end, dlen, bad);
++ ticket_buf = kmalloc(dlen, GFP_NOFS);
++ if (!ticket_buf) {
++ ret = -ENOMEM;
+ goto out;
+-
+- /* all is well, update our ticket */
+- ceph_crypto_key_destroy(&th->session_key);
+- if (th->ticket_blob)
+- ceph_buffer_put(th->ticket_blob);
+- th->session_key = new_session_key;
+- th->ticket_blob = new_ticket_blob;
+- th->validity = new_validity;
+- th->secret_id = new_secret_id;
+- th->expires = new_expires;
+- th->renew_after = new_renew_after;
+- dout(" got ticket service %d (%s) secret_id %lld len %d\n",
+- type, ceph_entity_type_name(type), th->secret_id,
+- (int)th->ticket_blob->vec.iov_len);
+- xi->have_keys |= th->service;
++ }
++ tp = ticket_buf;
++ ceph_decode_need(p, end, dlen, bad);
++ ceph_decode_copy(p, ticket_buf, dlen);
+ }
++ tpend = tp + dlen;
++ dout(" ticket blob is %d bytes\n", dlen);
++ ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
++ blob_struct_v = ceph_decode_8(&tp);
++ new_secret_id = ceph_decode_64(&tp);
++ ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
++ if (ret)
++ goto out;
++
++ /* all is well, update our ticket */
++ ceph_crypto_key_destroy(&th->session_key);
++ if (th->ticket_blob)
++ ceph_buffer_put(th->ticket_blob);
++ th->session_key = new_session_key;
++ th->ticket_blob = new_ticket_blob;
++ th->validity = new_validity;
++ th->secret_id = new_secret_id;
++ th->expires = new_expires;
++ th->renew_after = new_renew_after;
++ dout(" got ticket service %d (%s) secret_id %lld len %d\n",
++ type, ceph_entity_type_name(type), th->secret_id,
++ (int)th->ticket_blob->vec.iov_len);
++ xi->have_keys |= th->service;
+
+- ret = 0;
+ out:
+ kfree(ticket_buf);
+-out_dbuf:
+ kfree(dbuf);
+ return ret;
+
+@@ -270,6 +255,34 @@ bad:
+ goto out;
+ }
+
++static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
++ struct ceph_crypto_key *secret,
++ void *buf, void *end)
++{
++ void *p = buf;
++ u8 reply_struct_v;
++ u32 num;
++ int ret;
++
++ ceph_decode_8_safe(&p, end, reply_struct_v, bad);
++ if (reply_struct_v != 1)
++ return -EINVAL;
++
++ ceph_decode_32_safe(&p, end, num, bad);
++ dout("%d tickets\n", num);
++
++ while (num--) {
++ ret = process_one_ticket(ac, secret, &p, end);
++ if (ret)
++ return ret;
++ }
++
++ return 0;
++
++bad:
++ return -EINVAL;
++}
++
+ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
+ struct ceph_x_ticket_handler *th,
+ struct ceph_x_authorizer *au)
+@@ -583,13 +596,14 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
+ struct ceph_x_ticket_handler *th;
+ int ret = 0;
+ struct ceph_x_authorize_reply reply;
++ void *preply = &reply;
+ void *p = au->reply_buf;
+ void *end = p + sizeof(au->reply_buf);
+
+ th = get_ticket_handler(ac, au->service);
+ if (IS_ERR(th))
+ return PTR_ERR(th);
+- ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply));
++ ret = ceph_x_decrypt(&th->session_key, &p, end, &preply, sizeof(reply));
+ if (ret < 0)
+ return ret;
+ if (ret != sizeof(reply))
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index 1948d592aa54..3d9ddc2842e1 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -900,7 +900,7 @@ static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor,
+ BUG_ON(page_count > (int)USHRT_MAX);
+ cursor->page_count = (unsigned short)page_count;
+ BUG_ON(length > SIZE_MAX - cursor->page_offset);
+- cursor->last_piece = (size_t)cursor->page_offset + length <= PAGE_SIZE;
++ cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE;
+ }
+
+ static struct page *
+diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
+index 067d3af2eaf6..61fcfc304f68 100644
+--- a/net/ceph/mon_client.c
++++ b/net/ceph/mon_client.c
+@@ -1181,7 +1181,15 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
+ if (!m) {
+ pr_info("alloc_msg unknown type %d\n", type);
+ *skip = 1;
++ } else if (front_len > m->front_alloc_len) {
++ pr_warning("mon_alloc_msg front %d > prealloc %d (%u#%llu)\n",
++ front_len, m->front_alloc_len,
++ (unsigned int)con->peer_name.type,
++ le64_to_cpu(con->peer_name.num));
++ ceph_msg_put(m);
++ m = ceph_msg_new(type, front_len, GFP_NOFS, false);
+ }
++
+ return m;
+ }
+
+diff --git a/security/commoncap.c b/security/commoncap.c
+index b9d613e0ef14..963dc5981661 100644
+--- a/security/commoncap.c
++++ b/security/commoncap.c
+@@ -421,6 +421,9 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
+ cpu_caps->inheritable.cap[i] = le32_to_cpu(caps.data[i].inheritable);
+ }
+
++ cpu_caps->permitted.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
++ cpu_caps->inheritable.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
++
+ return 0;
+ }
+
+diff --git a/sound/soc/blackfin/bf5xx-i2s-pcm.c b/sound/soc/blackfin/bf5xx-i2s-pcm.c
+index a3881c4381c9..bcf591373a7a 100644
+--- a/sound/soc/blackfin/bf5xx-i2s-pcm.c
++++ b/sound/soc/blackfin/bf5xx-i2s-pcm.c
+@@ -290,19 +290,19 @@ static int bf5xx_pcm_silence(struct snd_pcm_substream *substream,
+ unsigned int sample_size = runtime->sample_bits / 8;
+ void *buf = runtime->dma_area;
+ struct bf5xx_i2s_pcm_data *dma_data;
+- unsigned int offset, size;
++ unsigned int offset, samples;
+
+ dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+
+ if (dma_data->tdm_mode) {
+ offset = pos * 8 * sample_size;
+- size = count * 8 * sample_size;
++ samples = count * 8;
+ } else {
+ offset = frames_to_bytes(runtime, pos);
+- size = frames_to_bytes(runtime, count);
++ samples = count * runtime->channels;
+ }
+
+- snd_pcm_format_set_silence(runtime->format, buf + offset, size);
++ snd_pcm_format_set_silence(runtime->format, buf + offset, samples);
+
+ return 0;
+ }
+diff --git a/sound/soc/codecs/adau1701.c b/sound/soc/codecs/adau1701.c
+index d71c59cf7bdd..370b742117ef 100644
+--- a/sound/soc/codecs/adau1701.c
++++ b/sound/soc/codecs/adau1701.c
+@@ -230,8 +230,10 @@ static int adau1701_reg_read(void *context, unsigned int reg,
+
+ *value = 0;
+
+- for (i = 0; i < size; i++)
+- *value |= recv_buf[i] << (i * 8);
++ for (i = 0; i < size; i++) {
++ *value <<= 8;
++ *value |= recv_buf[i];
++ }
+
+ return 0;
+ }
+diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
+index f5fccc7a8e89..d97f1ce7ff7d 100644
+--- a/sound/soc/codecs/max98090.c
++++ b/sound/soc/codecs/max98090.c
+@@ -2284,7 +2284,7 @@ static int max98090_probe(struct snd_soc_codec *codec)
+ /* Register for interrupts */
+ dev_dbg(codec->dev, "irq = %d\n", max98090->irq);
+
+- ret = request_threaded_irq(max98090->irq, NULL,
++ ret = devm_request_threaded_irq(codec->dev, max98090->irq, NULL,
+ max98090_interrupt, IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "max98090_interrupt", codec);
+ if (ret < 0) {
+diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
+index de80e89b5fd8..70679cf14c83 100644
+--- a/sound/soc/codecs/rt5640.c
++++ b/sound/soc/codecs/rt5640.c
+@@ -2059,6 +2059,7 @@ static struct snd_soc_codec_driver soc_codec_dev_rt5640 = {
+ static const struct regmap_config rt5640_regmap = {
+ .reg_bits = 8,
+ .val_bits = 16,
++ .use_single_rw = true,
+
+ .max_register = RT5640_VENDOR_ID2 + 1 + (ARRAY_SIZE(rt5640_ranges) *
+ RT5640_PR_SPACING),
+diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
+index 23419109ecac..1cdae8ccc61b 100644
+--- a/sound/soc/codecs/tlv320aic31xx.c
++++ b/sound/soc/codecs/tlv320aic31xx.c
+@@ -1178,7 +1178,7 @@ static void aic31xx_pdata_from_of(struct aic31xx_priv *aic31xx)
+ }
+ #endif /* CONFIG_OF */
+
+-static void aic31xx_device_init(struct aic31xx_priv *aic31xx)
++static int aic31xx_device_init(struct aic31xx_priv *aic31xx)
+ {
+ int ret, i;
+
+@@ -1197,7 +1197,7 @@ static void aic31xx_device_init(struct aic31xx_priv *aic31xx)
+ "aic31xx-reset-pin");
+ if (ret < 0) {
+ dev_err(aic31xx->dev, "not able to acquire gpio\n");
+- return;
++ return ret;
+ }
+ }
+
+@@ -1210,6 +1210,7 @@ static void aic31xx_device_init(struct aic31xx_priv *aic31xx)
+ if (ret != 0)
+ dev_err(aic31xx->dev, "Failed to request supplies: %d\n", ret);
+
++ return ret;
+ }
+
+ static int aic31xx_i2c_probe(struct i2c_client *i2c,
+@@ -1239,7 +1240,9 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
+
+ aic31xx->pdata.codec_type = id->driver_data;
+
+- aic31xx_device_init(aic31xx);
++ ret = aic31xx_device_init(aic31xx);
++ if (ret)
++ return ret;
+
+ return snd_soc_register_codec(&i2c->dev, &soc_codec_driver_aic31xx,
+ aic31xx_dai_driver,
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
+index 247b39013fba..9719d3ca8e47 100644
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -3505,6 +3505,7 @@ static irqreturn_t wm8994_mic_irq(int irq, void *data)
+ return IRQ_HANDLED;
+ }
+
++/* Should be called with accdet_lock held */
+ static void wm1811_micd_stop(struct snd_soc_codec *codec)
+ {
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+@@ -3512,14 +3513,10 @@ static void wm1811_micd_stop(struct snd_soc_codec *codec)
+ if (!wm8994->jackdet)
+ return;
+
+- mutex_lock(&wm8994->accdet_lock);
+-
+ snd_soc_update_bits(codec, WM8958_MIC_DETECT_1, WM8958_MICD_ENA, 0);
+
+ wm1811_jackdet_set_mode(codec, WM1811_JACKDET_MODE_JACK);
+
+- mutex_unlock(&wm8994->accdet_lock);
+-
+ if (wm8994->wm8994->pdata.jd_ext_cap)
+ snd_soc_dapm_disable_pin(&codec->dapm,
+ "MICBIAS2");
+@@ -3560,10 +3557,10 @@ static void wm8958_open_circuit_work(struct work_struct *work)
+ open_circuit_work.work);
+ struct device *dev = wm8994->wm8994->dev;
+
+- wm1811_micd_stop(wm8994->hubs.codec);
+-
+ mutex_lock(&wm8994->accdet_lock);
+
++ wm1811_micd_stop(wm8994->hubs.codec);
++
+ dev_dbg(dev, "Reporting open circuit\n");
+
+ wm8994->jack_mic = false;
+diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
+index 060027182dcb..2537725dd53f 100644
+--- a/sound/soc/codecs/wm_adsp.c
++++ b/sound/soc/codecs/wm_adsp.c
+@@ -1758,3 +1758,5 @@ int wm_adsp2_init(struct wm_adsp *adsp, bool dvfs)
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(wm_adsp2_init);
++
++MODULE_LICENSE("GPL v2");
+diff --git a/sound/soc/intel/sst-baytrail-pcm.c b/sound/soc/intel/sst-baytrail-pcm.c
+index 8eab97368ea7..599401c0c655 100644
+--- a/sound/soc/intel/sst-baytrail-pcm.c
++++ b/sound/soc/intel/sst-baytrail-pcm.c
+@@ -32,7 +32,7 @@ static const struct snd_pcm_hardware sst_byt_pcm_hardware = {
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_RESUME,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+- SNDRV_PCM_FORMAT_S24_LE,
++ SNDRV_PCM_FMTBIT_S24_LE,
+ .period_bytes_min = 384,
+ .period_bytes_max = 48000,
+ .periods_min = 2,
+diff --git a/sound/soc/intel/sst-haswell-pcm.c b/sound/soc/intel/sst-haswell-pcm.c
+index 058efb17c568..61bf6da4bb02 100644
+--- a/sound/soc/intel/sst-haswell-pcm.c
++++ b/sound/soc/intel/sst-haswell-pcm.c
+@@ -80,7 +80,7 @@ static const struct snd_pcm_hardware hsw_pcm_hardware = {
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_RESUME |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+- .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FORMAT_S24_LE |
++ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ .period_bytes_min = PAGE_SIZE,
+ .period_bytes_max = (HSW_PCM_PERIODS_MAX / HSW_PCM_PERIODS_MIN) * PAGE_SIZE,
+@@ -400,7 +400,15 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
+ sst_hsw_stream_set_valid(hsw, pcm_data->stream, 16);
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+- bits = SST_HSW_DEPTH_24BIT;
++ bits = SST_HSW_DEPTH_32BIT;
++ sst_hsw_stream_set_valid(hsw, pcm_data->stream, 24);
++ break;
++ case SNDRV_PCM_FORMAT_S8:
++ bits = SST_HSW_DEPTH_8BIT;
++ sst_hsw_stream_set_valid(hsw, pcm_data->stream, 8);
++ break;
++ case SNDRV_PCM_FORMAT_S32_LE:
++ bits = SST_HSW_DEPTH_32BIT;
+ sst_hsw_stream_set_valid(hsw, pcm_data->stream, 32);
+ break;
+ default:
+@@ -685,8 +693,9 @@ static int hsw_pcm_new(struct snd_soc_pcm_runtime *rtd)
+ }
+
+ #define HSW_FORMATS \
+- (SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S16_LE |\
+- SNDRV_PCM_FMTBIT_S32_LE)
++ (SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S24_LE | \
++ SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S16_LE |\
++ SNDRV_PCM_FMTBIT_S8)
+
+ static struct snd_soc_dai_driver hsw_dais[] = {
+ {
+@@ -696,7 +705,7 @@ static struct snd_soc_dai_driver hsw_dais[] = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_48000,
+- .formats = SNDRV_PCM_FMTBIT_S16_LE,
++ .formats = SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ },
+ {
+@@ -727,8 +736,8 @@ static struct snd_soc_dai_driver hsw_dais[] = {
+ .stream_name = "Loopback Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+- .rates = SNDRV_PCM_RATE_8000_192000,
+- .formats = HSW_FORMATS,
++ .rates = SNDRV_PCM_RATE_48000,
++ .formats = SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ },
+ {
+@@ -737,8 +746,8 @@ static struct snd_soc_dai_driver hsw_dais[] = {
+ .stream_name = "Analog Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+- .rates = SNDRV_PCM_RATE_8000_192000,
+- .formats = HSW_FORMATS,
++ .rates = SNDRV_PCM_RATE_48000,
++ .formats = SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ },
+ };
+diff --git a/sound/soc/omap/omap-twl4030.c b/sound/soc/omap/omap-twl4030.c
+index f8a6adc2d81c..4336d1831485 100644
+--- a/sound/soc/omap/omap-twl4030.c
++++ b/sound/soc/omap/omap-twl4030.c
+@@ -260,7 +260,7 @@ static struct snd_soc_dai_link omap_twl4030_dai_links[] = {
+ .stream_name = "TWL4030 Voice",
+ .cpu_dai_name = "omap-mcbsp.3",
+ .codec_dai_name = "twl4030-voice",
+- .platform_name = "omap-mcbsp.2",
++ .platform_name = "omap-mcbsp.3",
+ .codec_name = "twl4030-codec",
+ .dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_IB_NF |
+ SND_SOC_DAIFMT_CBM_CFM,
+diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
+index 199a8b377553..a8e097433074 100644
+--- a/sound/soc/pxa/pxa-ssp.c
++++ b/sound/soc/pxa/pxa-ssp.c
+@@ -723,7 +723,8 @@ static int pxa_ssp_probe(struct snd_soc_dai *dai)
+ ssp_handle = of_parse_phandle(dev->of_node, "port", 0);
+ if (!ssp_handle) {
+ dev_err(dev, "unable to get 'port' phandle\n");
+- return -ENODEV;
++ ret = -ENODEV;
++ goto err_priv;
+ }
+
+ priv->ssp = pxa_ssp_request_of(ssp_handle, "SoC audio");
+@@ -764,9 +765,7 @@ static int pxa_ssp_remove(struct snd_soc_dai *dai)
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | \
+ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
+
+-#define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+- SNDRV_PCM_FMTBIT_S24_LE | \
+- SNDRV_PCM_FMTBIT_S32_LE)
++#define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+ static const struct snd_soc_dai_ops pxa_ssp_dai_ops = {
+ .startup = pxa_ssp_startup,
+diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
+index 2ac76fa3e742..5f9b255a8b38 100644
+--- a/sound/soc/samsung/i2s.c
++++ b/sound/soc/samsung/i2s.c
+@@ -920,11 +920,9 @@ static int i2s_suspend(struct snd_soc_dai *dai)
+ {
+ struct i2s_dai *i2s = to_info(dai);
+
+- if (dai->active) {
+- i2s->suspend_i2smod = readl(i2s->addr + I2SMOD);
+- i2s->suspend_i2scon = readl(i2s->addr + I2SCON);
+- i2s->suspend_i2spsr = readl(i2s->addr + I2SPSR);
+- }
++ i2s->suspend_i2smod = readl(i2s->addr + I2SMOD);
++ i2s->suspend_i2scon = readl(i2s->addr + I2SCON);
++ i2s->suspend_i2spsr = readl(i2s->addr + I2SPSR);
+
+ return 0;
+ }
+@@ -933,11 +931,9 @@ static int i2s_resume(struct snd_soc_dai *dai)
+ {
+ struct i2s_dai *i2s = to_info(dai);
+
+- if (dai->active) {
+- writel(i2s->suspend_i2scon, i2s->addr + I2SCON);
+- writel(i2s->suspend_i2smod, i2s->addr + I2SMOD);
+- writel(i2s->suspend_i2spsr, i2s->addr + I2SPSR);
+- }
++ writel(i2s->suspend_i2scon, i2s->addr + I2SCON);
++ writel(i2s->suspend_i2smod, i2s->addr + I2SMOD);
++ writel(i2s->suspend_i2spsr, i2s->addr + I2SPSR);
+
+ return 0;
+ }
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 54d18f22a33e..4ea656770d65 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -2069,6 +2069,7 @@ int soc_dpcm_runtime_update(struct snd_soc_card *card)
+ dpcm_be_disconnect(fe, SNDRV_PCM_STREAM_PLAYBACK);
+ }
+
++ dpcm_path_put(&list);
+ capture:
+ /* skip if FE doesn't have capture capability */
+ if (!fe->cpu_dai->driver->capture.channels_min)
+diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
+index e66e710cc595..0a8a9db43d34 100644
+--- a/tools/testing/selftests/Makefile
++++ b/tools/testing/selftests/Makefile
+@@ -4,6 +4,7 @@ TARGETS += efivarfs
+ TARGETS += kcmp
+ TARGETS += memory-hotplug
+ TARGETS += mqueue
++TARGETS += mount
+ TARGETS += net
+ TARGETS += ptrace
+ TARGETS += timers
+diff --git a/tools/testing/selftests/mount/Makefile b/tools/testing/selftests/mount/Makefile
+new file mode 100644
+index 000000000000..337d853c2b72
+--- /dev/null
++++ b/tools/testing/selftests/mount/Makefile
+@@ -0,0 +1,17 @@
++# Makefile for mount selftests.
++
++all: unprivileged-remount-test
++
++unprivileged-remount-test: unprivileged-remount-test.c
++ gcc -Wall -O2 unprivileged-remount-test.c -o unprivileged-remount-test
++
++# Allow specific tests to be selected.
++test_unprivileged_remount: unprivileged-remount-test
++ @if [ -f /proc/self/uid_map ] ; then ./unprivileged-remount-test ; fi
++
++run_tests: all test_unprivileged_remount
++
++clean:
++ rm -f unprivileged-remount-test
++
++.PHONY: all test_unprivileged_remount
+diff --git a/tools/testing/selftests/mount/unprivileged-remount-test.c b/tools/testing/selftests/mount/unprivileged-remount-test.c
+new file mode 100644
+index 000000000000..1b3ff2fda4d0
+--- /dev/null
++++ b/tools/testing/selftests/mount/unprivileged-remount-test.c
+@@ -0,0 +1,242 @@
++#define _GNU_SOURCE
++#include <sched.h>
++#include <stdio.h>
++#include <errno.h>
++#include <string.h>
++#include <sys/types.h>
++#include <sys/mount.h>
++#include <sys/wait.h>
++#include <stdlib.h>
++#include <unistd.h>
++#include <fcntl.h>
++#include <grp.h>
++#include <stdbool.h>
++#include <stdarg.h>
++
++#ifndef CLONE_NEWNS
++# define CLONE_NEWNS 0x00020000
++#endif
++#ifndef CLONE_NEWUTS
++# define CLONE_NEWUTS 0x04000000
++#endif
++#ifndef CLONE_NEWIPC
++# define CLONE_NEWIPC 0x08000000
++#endif
++#ifndef CLONE_NEWNET
++# define CLONE_NEWNET 0x40000000
++#endif
++#ifndef CLONE_NEWUSER
++# define CLONE_NEWUSER 0x10000000
++#endif
++#ifndef CLONE_NEWPID
++# define CLONE_NEWPID 0x20000000
++#endif
++
++#ifndef MS_RELATIME
++#define MS_RELATIME (1 << 21)
++#endif
++#ifndef MS_STRICTATIME
++#define MS_STRICTATIME (1 << 24)
++#endif
++
++static void die(char *fmt, ...)
++{
++ va_list ap;
++ va_start(ap, fmt);
++ vfprintf(stderr, fmt, ap);
++ va_end(ap);
++ exit(EXIT_FAILURE);
++}
++
++static void write_file(char *filename, char *fmt, ...)
++{
++ char buf[4096];
++ int fd;
++ ssize_t written;
++ int buf_len;
++ va_list ap;
++
++ va_start(ap, fmt);
++ buf_len = vsnprintf(buf, sizeof(buf), fmt, ap);
++ va_end(ap);
++ if (buf_len < 0) {
++ die("vsnprintf failed: %s\n",
++ strerror(errno));
++ }
++ if (buf_len >= sizeof(buf)) {
++ die("vsnprintf output truncated\n");
++ }
++
++ fd = open(filename, O_WRONLY);
++ if (fd < 0) {
++ die("open of %s failed: %s\n",
++ filename, strerror(errno));
++ }
++ written = write(fd, buf, buf_len);
++ if (written != buf_len) {
++ if (written >= 0) {
++ die("short write to %s\n", filename);
++ } else {
++ die("write to %s failed: %s\n",
++ filename, strerror(errno));
++ }
++ }
++ if (close(fd) != 0) {
++ die("close of %s failed: %s\n",
++ filename, strerror(errno));
++ }
++}
++
++static void create_and_enter_userns(void)
++{
++ uid_t uid;
++ gid_t gid;
++
++ uid = getuid();
++ gid = getgid();
++
++ if (unshare(CLONE_NEWUSER) !=0) {
++ die("unshare(CLONE_NEWUSER) failed: %s\n",
++ strerror(errno));
++ }
++
++ write_file("/proc/self/uid_map", "0 %d 1", uid);
++ write_file("/proc/self/gid_map", "0 %d 1", gid);
++
++ if (setgroups(0, NULL) != 0) {
++ die("setgroups failed: %s\n",
++ strerror(errno));
++ }
++ if (setgid(0) != 0) {
++ die ("setgid(0) failed %s\n",
++ strerror(errno));
++ }
++ if (setuid(0) != 0) {
++ die("setuid(0) failed %s\n",
++ strerror(errno));
++ }
++}
++
++static
++bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags)
++{
++ pid_t child;
++
++ child = fork();
++ if (child == -1) {
++ die("fork failed: %s\n",
++ strerror(errno));
++ }
++ if (child != 0) { /* parent */
++ pid_t pid;
++ int status;
++ pid = waitpid(child, &status, 0);
++ if (pid == -1) {
++ die("waitpid failed: %s\n",
++ strerror(errno));
++ }
++ if (pid != child) {
++ die("waited for %d got %d\n",
++ child, pid);
++ }
++ if (!WIFEXITED(status)) {
++ die("child did not terminate cleanly\n");
++ }
++ return WEXITSTATUS(status) == EXIT_SUCCESS ? true : false;
++ }
++
++ create_and_enter_userns();
++ if (unshare(CLONE_NEWNS) != 0) {
++ die("unshare(CLONE_NEWNS) failed: %s\n",
++ strerror(errno));
++ }
++
++ if (mount("testing", "/tmp", "ramfs", mount_flags, NULL) != 0) {
++ die("mount of /tmp failed: %s\n",
++ strerror(errno));
++ }
++
++ create_and_enter_userns();
++
++ if (unshare(CLONE_NEWNS) != 0) {
++ die("unshare(CLONE_NEWNS) failed: %s\n",
++ strerror(errno));
++ }
++
++ if (mount("/tmp", "/tmp", "none",
++ MS_REMOUNT | MS_BIND | remount_flags, NULL) != 0) {
++ /* system("cat /proc/self/mounts"); */
++ die("remount of /tmp failed: %s\n",
++ strerror(errno));
++ }
++
++ if (mount("/tmp", "/tmp", "none",
++ MS_REMOUNT | MS_BIND | invalid_flags, NULL) == 0) {
++ /* system("cat /proc/self/mounts"); */
++ die("remount of /tmp with invalid flags "
++ "succeeded unexpectedly\n");
++ }
++ exit(EXIT_SUCCESS);
++}
++
++static bool test_unpriv_remount_simple(int mount_flags)
++{
++ return test_unpriv_remount(mount_flags, mount_flags, 0);
++}
++
++static bool test_unpriv_remount_atime(int mount_flags, int invalid_flags)
++{
++ return test_unpriv_remount(mount_flags, mount_flags, invalid_flags);
++}
++
++int main(int argc, char **argv)
++{
++ if (!test_unpriv_remount_simple(MS_RDONLY|MS_NODEV)) {
++ die("MS_RDONLY malfunctions\n");
++ }
++ if (!test_unpriv_remount_simple(MS_NODEV)) {
++ die("MS_NODEV malfunctions\n");
++ }
++ if (!test_unpriv_remount_simple(MS_NOSUID|MS_NODEV)) {
++ die("MS_NOSUID malfunctions\n");
++ }
++ if (!test_unpriv_remount_simple(MS_NOEXEC|MS_NODEV)) {
++ die("MS_NOEXEC malfunctions\n");
++ }
++ if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODEV,
++ MS_NOATIME|MS_NODEV))
++ {
++ die("MS_RELATIME malfunctions\n");
++ }
++ if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODEV,
++ MS_NOATIME|MS_NODEV))
++ {
++ die("MS_STRICTATIME malfunctions\n");
++ }
++ if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODEV,
++ MS_STRICTATIME|MS_NODEV))
++ {
++ die("MS_RELATIME malfunctions\n");
++ }
++ if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME|MS_NODEV,
++ MS_NOATIME|MS_NODEV))
++ {
++ die("MS_RELATIME malfunctions\n");
++ }
++ if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME|MS_NODEV,
++ MS_NOATIME|MS_NODEV))
++ {
++ die("MS_RELATIME malfunctions\n");
++ }
++ if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME|MS_NODEV,
++ MS_STRICTATIME|MS_NODEV))
++ {
++ die("MS_RELATIME malfunctions\n");
++ }
++ if (!test_unpriv_remount(MS_STRICTATIME|MS_NODEV, MS_NODEV,
++ MS_NOATIME|MS_NODEV))
++ {
++ die("Default atime malfunctions\n");
++ }
++ return EXIT_SUCCESS;
++}
diff --git a/series.conf b/series.conf
index 2cf5e95ab7..ad84557a21 100644
--- a/series.conf
+++ b/series.conf
@@ -29,6 +29,7 @@
########################################################
patches.kernel.org/patch-3.16.1
patches.kernel.org/patch-3.16.1-2
+ patches.kernel.org/patch-3.16.2-3
########################################################
# Build fixes that apply to the vanilla kernel too.
@@ -260,7 +261,6 @@
########################################################
# Reiserfs Patches
########################################################
- patches.fixes/reiserfs-fix-corruption-introduced-by-balance_leaf-refactor
########################################################
# dlm
@@ -402,7 +402,6 @@
########################################################
# USB
########################################################
- patches.fixes/rtsx_usb-export-device-table
########################################################
# I2C